Spaces:
Running
Running
from langchain.llms import HuggingFaceHub | |
from langchain.chains import RetrievalQA | |
from langchain.embeddings import HuggingFaceEmbeddings | |
from langchain.vectorstores import FAISS | |
from langchain.document_loaders import TextLoader | |
from langchain.text_splitter import CharacterTextSplitter | |
import os | |
class InterviewAgent: | |
def __init__(self, job_role, cv_summary): | |
self.job_role = job_role | |
self.cv_summary = cv_summary | |
self.llm = HuggingFaceHub( | |
repo_id="meta-llama/Meta-Llama-3-8B-Instruct", | |
model_kwargs={"temperature": 0.5, "max_length": 2048} | |
) | |
self.questions = self._generate_questions() | |
def _generate_questions(self): | |
# Load job-specific questions | |
base_questions = self._load_base_questions() | |
# Generate CV-specific questions | |
cv_questions = self._generate_cv_questions() | |
return base_questions + cv_questions | |
def _load_base_questions(self): | |
# In a real app, these would be more sophisticated and loaded from a database | |
role_questions = { | |
"Software Engineer": [ | |
{"text": "Explain the SOLID principles in object-oriented design.", "type": "technical", "weight": 0.3}, | |
{"text": "How would you optimize a slow database query?", "type": "technical", "weight": 0.25}, | |
{"text": "Describe your experience with Agile methodologies.", "type": "behavioral", "weight": 0.2}, | |
{"text": "How do you handle conflicts in a team setting?", "type": "behavioral", "weight": 0.15}, | |
{"text": "Where do you see yourself in 5 years?", "type": "general", "weight": 0.1} | |
], | |
"Data Scientist": [ | |
{"text": "Explain the bias-variance tradeoff.", "type": "technical", "weight": 0.3}, | |
{"text": "How would you handle missing data in a dataset?", "type": "technical", "weight": 0.25}, | |
{"text": "Describe a time when you had to explain complex technical concepts to non-technical stakeholders.", "type": "behavioral", "weight": 0.2}, | |
{"text": "How do you stay updated with the latest developments in data science?", "type": "behavioral", "weight": 0.15}, | |
{"text": "What motivates you to work in data science?", "type": "general", "weight": 0.1} | |
] | |
} | |
return role_questions.get(self.job_role, role_questions["Software Engineer"]) | |
def _generate_cv_questions(self): | |
# Generate questions based on CV content | |
prompt = f""" | |
Based on the following CV summary for a {self.job_role} position, generate 3 specific interview questions. | |
Focus on areas that need clarification or seem particularly relevant to the role. | |
CV Summary: | |
{self.cv_summary['text']} | |
Generate exactly 3 questions in this format: | |
1. [question text]|technical | |
2. [question text]|behavioral | |
3. [question text]|technical | |
Make the questions specific to the candidate's experience and the job role. | |
""" | |
response = self.llm(prompt) | |
questions = [] | |
for line in response.split('\n'): | |
if line.strip() and '|' in line: | |
text = line.split('|')[0].strip() | |
q_type = line.split('|')[1].strip().lower() | |
questions.append({ | |
"text": text, | |
"type": q_type, | |
"weight": 0.15 if q_type == "technical" else 0.1, | |
"cv_based": True | |
}) | |
return questions[:3] # Ensure we only take 3 questions | |
def get_questions(self): | |
return self.questions | |
def evaluate_answer(self, question, answer): | |
prompt = f""" | |
Evaluate the following interview answer for a {self.job_role} position. | |
Provide specific feedback and a score from 1-10 based on: | |
- Technical accuracy (if technical question) | |
- Relevance to the question | |
- Clarity of communication | |
- Demonstration of skills/experience | |
Question: {question['text']} | |
Answer: {answer} | |
Respond in this exact format: | |
Score: [x]/10 | |
Feedback: [your feedback here] | |
""" | |
response = self.llm(prompt) | |
# Parse the response | |
score = 5 # default if parsing fails | |
feedback = "Evaluation not available" | |
if "Score:" in response and "Feedback:" in response: | |
try: | |
score_part = response.split("Score:")[1].split("/10")[0].strip() | |
score = float(score_part) | |
feedback = response.split("Feedback:")[1].strip() | |
except: | |
pass | |
return { | |
"score": score, | |
"feedback": feedback, | |
"max_score": 10 | |
} | |
def final_evaluation(self, answers): | |
total_score = 0 | |
max_possible = 0 | |
# Calculate weighted score | |
for answer in answers: | |
weight = answer['question'].get('weight', 0.1) | |
total_score += answer['evaluation']['score'] * weight | |
max_possible += 10 * weight | |
overall_score = (total_score / max_possible) * 10 | |
# Determine band | |
if overall_score >= 9: | |
band = "Expert (Band 5)" | |
elif overall_score >= 7: | |
band = "Proficient (Band 4)" | |
elif overall_score >= 5: | |
band = "Competent (Band 3)" | |
elif overall_score >= 3: | |
band = "Limited (Band 2)" | |
else: | |
band = "Beginner (Band 1)" | |
return { | |
"score": round(overall_score, 1), | |
"band": band, | |
"total_questions": len(answers) | |
} |