Jekyll2000 commited on
Commit
df0afa1
·
verified ·
1 Parent(s): 15f9017

Create utils/interview_agent.py

Browse files
Files changed (1) hide show
  1. utils/interview_agent.py +150 -0
utils/interview_agent.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.llms import HuggingFaceHub
2
+ from langchain.chains import RetrievalQA
3
+ from langchain.embeddings import HuggingFaceEmbeddings
4
+ from langchain.vectorstores import FAISS
5
+ from langchain.document_loaders import TextLoader
6
+ from langchain.text_splitter import CharacterTextSplitter
7
+ import os
8
+
9
+ class InterviewAgent:
10
+ def __init__(self, job_role, cv_summary):
11
+ self.job_role = job_role
12
+ self.cv_summary = cv_summary
13
+ self.llm = HuggingFaceHub(
14
+ repo_id="meta-llama/Meta-Llama-3-8B-Instruct",
15
+ model_kwargs={"temperature": 0.5, "max_length": 2048}
16
+ )
17
+ self.questions = self._generate_questions()
18
+
19
+ def _generate_questions(self):
20
+ # Load job-specific questions
21
+ base_questions = self._load_base_questions()
22
+
23
+ # Generate CV-specific questions
24
+ cv_questions = self._generate_cv_questions()
25
+
26
+ return base_questions + cv_questions
27
+
28
+ def _load_base_questions(self):
29
+ # In a real app, these would be more sophisticated and loaded from a database
30
+ role_questions = {
31
+ "Software Engineer": [
32
+ {"text": "Explain the SOLID principles in object-oriented design.", "type": "technical", "weight": 0.3},
33
+ {"text": "How would you optimize a slow database query?", "type": "technical", "weight": 0.25},
34
+ {"text": "Describe your experience with Agile methodologies.", "type": "behavioral", "weight": 0.2},
35
+ {"text": "How do you handle conflicts in a team setting?", "type": "behavioral", "weight": 0.15},
36
+ {"text": "Where do you see yourself in 5 years?", "type": "general", "weight": 0.1}
37
+ ],
38
+ "Data Scientist": [
39
+ {"text": "Explain the bias-variance tradeoff.", "type": "technical", "weight": 0.3},
40
+ {"text": "How would you handle missing data in a dataset?", "type": "technical", "weight": 0.25},
41
+ {"text": "Describe a time when you had to explain complex technical concepts to non-technical stakeholders.", "type": "behavioral", "weight": 0.2},
42
+ {"text": "How do you stay updated with the latest developments in data science?", "type": "behavioral", "weight": 0.15},
43
+ {"text": "What motivates you to work in data science?", "type": "general", "weight": 0.1}
44
+ ]
45
+ }
46
+
47
+ return role_questions.get(self.job_role, role_questions["Software Engineer"])
48
+
49
+ def _generate_cv_questions(self):
50
+ # Generate questions based on CV content
51
+ prompt = f"""
52
+ Based on the following CV summary for a {self.job_role} position, generate 3 specific interview questions.
53
+ Focus on areas that need clarification or seem particularly relevant to the role.
54
+
55
+ CV Summary:
56
+ {self.cv_summary['text']}
57
+
58
+ Generate exactly 3 questions in this format:
59
+ 1. [question text]|technical
60
+ 2. [question text]|behavioral
61
+ 3. [question text]|technical
62
+
63
+ Make the questions specific to the candidate's experience and the job role.
64
+ """
65
+
66
+ response = self.llm(prompt)
67
+ questions = []
68
+
69
+ for line in response.split('\n'):
70
+ if line.strip() and '|' in line:
71
+ text = line.split('|')[0].strip()
72
+ q_type = line.split('|')[1].strip().lower()
73
+ questions.append({
74
+ "text": text,
75
+ "type": q_type,
76
+ "weight": 0.15 if q_type == "technical" else 0.1,
77
+ "cv_based": True
78
+ })
79
+
80
+ return questions[:3] # Ensure we only take 3 questions
81
+
82
+ def get_questions(self):
83
+ return self.questions
84
+
85
+ def evaluate_answer(self, question, answer):
86
+ prompt = f"""
87
+ Evaluate the following interview answer for a {self.job_role} position.
88
+ Provide specific feedback and a score from 1-10 based on:
89
+ - Technical accuracy (if technical question)
90
+ - Relevance to the question
91
+ - Clarity of communication
92
+ - Demonstration of skills/experience
93
+
94
+ Question: {question['text']}
95
+ Answer: {answer}
96
+
97
+ Respond in this exact format:
98
+ Score: [x]/10
99
+ Feedback: [your feedback here]
100
+ """
101
+
102
+ response = self.llm(prompt)
103
+
104
+ # Parse the response
105
+ score = 5 # default if parsing fails
106
+ feedback = "Evaluation not available"
107
+
108
+ if "Score:" in response and "Feedback:" in response:
109
+ try:
110
+ score_part = response.split("Score:")[1].split("/10")[0].strip()
111
+ score = float(score_part)
112
+ feedback = response.split("Feedback:")[1].strip()
113
+ except:
114
+ pass
115
+
116
+ return {
117
+ "score": score,
118
+ "feedback": feedback,
119
+ "max_score": 10
120
+ }
121
+
122
+ def final_evaluation(self, answers):
123
+ total_score = 0
124
+ max_possible = 0
125
+
126
+ # Calculate weighted score
127
+ for answer in answers:
128
+ weight = answer['question'].get('weight', 0.1)
129
+ total_score += answer['evaluation']['score'] * weight
130
+ max_possible += 10 * weight
131
+
132
+ overall_score = (total_score / max_possible) * 10
133
+
134
+ # Determine band
135
+ if overall_score >= 9:
136
+ band = "Expert (Band 5)"
137
+ elif overall_score >= 7:
138
+ band = "Proficient (Band 4)"
139
+ elif overall_score >= 5:
140
+ band = "Competent (Band 3)"
141
+ elif overall_score >= 3:
142
+ band = "Limited (Band 2)"
143
+ else:
144
+ band = "Beginner (Band 1)"
145
+
146
+ return {
147
+ "score": round(overall_score, 1),
148
+ "band": band,
149
+ "total_questions": len(answers)
150
+ }