Spaces:
Sleeping
Sleeping
| import os | |
| import re | |
| from datetime import datetime | |
| import PyPDF2 | |
| import torch | |
| from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoModelForSeq2SeqLM | |
| from sentence_transformers import SentenceTransformer, util | |
| from groq import Groq | |
| import gradio as gr | |
| # Set your API key for Groq | |
| os.environ["GROQ_API_KEY"] = "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" | |
| client = Groq(api_key=os.environ.get("GROQ_API_KEY")) | |
| # --- PDF/Text Extraction Functions --- # | |
| def extract_text_from_file(file_path): | |
| """Extracts text from PDF or TXT files based on file extension.""" | |
| if file_path.endswith('.pdf'): | |
| return extract_text_from_pdf(file_path) | |
| elif file_path.endswith('.txt'): | |
| return extract_text_from_txt(file_path) | |
| else: | |
| raise ValueError("Unsupported file type. Only PDF and TXT files are accepted.") | |
| def extract_text_from_pdf(pdf_file_path): | |
| """Extracts text from a PDF file.""" | |
| with open(pdf_file_path, 'rb') as pdf_file: | |
| pdf_reader = PyPDF2.PdfReader(pdf_file) | |
| text = ''.join(page.extract_text() for page in pdf_reader.pages if page.extract_text()) | |
| return text | |
| def extract_text_from_txt(txt_file_path): | |
| """Extracts text from a .txt file.""" | |
| with open(txt_file_path, 'r', encoding='utf-8') as txt_file: | |
| return txt_file.read() | |
| # --- Skill Extraction with Llama Model --- # | |
| def extract_skills_llama(text): | |
| """Extracts skills from the text using the Llama model via Groq API.""" | |
| try: | |
| response = client.chat.completions.create( | |
| messages=[{"role": "user", "content": f"Extract skills from the following text: {text}"}], | |
| model="llama3-70b-8192", | |
| ) | |
| skills = response.choices[0].message.content.split(', ') # Expecting a comma-separated list | |
| return skills | |
| except Exception as e: | |
| raise RuntimeError(f"Error during skill extraction: {e}") | |
| # --- Job Description Processing Function --- # | |
| def process_job_description(text): | |
| """Extracts skills or relevant keywords from the job description.""" | |
| return extract_skills_llama(text) | |
| # --- Qualification and Experience Extraction --- # | |
| def extract_qualifications(text): | |
| """Extracts qualifications from text (e.g., degrees, certifications).""" | |
| qualifications = re.findall(r'(bachelor|master|phd|certified|degree)', text, re.IGNORECASE) | |
| return qualifications if qualifications else ['No specific qualifications found'] | |
| def extract_experience(text): | |
| """Extracts years of experience from the text.""" | |
| experience_years = re.findall(r'(\d+)\s*(years|year) of experience', text, re.IGNORECASE) | |
| job_titles = re.findall(r'\b(software engineer|developer|manager|analyst)\b', text, re.IGNORECASE) | |
| experience_years = [int(year[0]) for year in experience_years] | |
| return experience_years, job_titles | |
| # --- Sentiment Analysis --- # | |
| def analyze_sentiment(text): | |
| """Analyzes the sentiment of the text.""" | |
| model_name = "mrm8488/distiluse-base-multilingual-cased-v2-finetuned-stsb_multi_mt-es" | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForSequenceClassification.from_pretrained(model_name) | |
| inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True) | |
| with torch.no_grad(): | |
| outputs = model(**inputs) | |
| predicted_sentiment = torch.argmax(outputs.logits).item() | |
| return ["Negative", "Neutral", "Positive"][predicted_sentiment] | |
| # --- Semantic Similarity Calculation --- # | |
| def calculate_semantic_similarity(text1, text2): | |
| """Calculates semantic similarity using a sentence transformer model and returns the score as a percentage.""" | |
| model = SentenceTransformer('paraphrase-MiniLM-L6-v2') | |
| embeddings1 = model.encode(text1, convert_to_tensor=True) | |
| embeddings2 = model.encode(text2, convert_to_tensor=True) | |
| similarity_score = util.pytorch_cos_sim(embeddings1, embeddings2).item() | |
| # Convert similarity score to percentage | |
| similarity_percentage = similarity_score * 100 | |
| return similarity_percentage | |
| # --- Communication Generation --- # | |
| def communication_generator(resume_skills, job_description_skills, skills_similarity, qualifications_similarity, experience_similarity, candidate_experience): | |
| """Generates a detailed communication response based on similarity scores and additional criteria.""" | |
| # Assess candidate fit based on similarity scores | |
| fit_status = "strong fit" if skills_similarity >= 80 and qualifications_similarity >= 80 and experience_similarity >= 80 else \ | |
| "moderate fit" if skills_similarity >= 50 else "weak fit" | |
| # Build a message that includes a recommendation based on various assessments | |
| if fit_status == "strong fit": | |
| recommendation = "We recommend moving forward with this candidate, as they demonstrate a high level of alignment with the role requirements." | |
| elif fit_status == "moderate fit": | |
| recommendation = "This candidate shows potential; however, further assessment or interviews are recommended to clarify their fit for the role." | |
| else: | |
| recommendation = "We advise against moving forward with this candidate, as they do not meet the key technical requirements for the position." | |
| message = ( | |
| f"After a detailed analysis of the candidate's resume, we found the following insights:\n\n" | |
| f"- **Skills Match**: {skills_similarity:.2f}% (based on required technologies: {', '.join(job_description_skills)})\n" | |
| f"- **Experience Match**: {experience_similarity:.2f}% (relevant experience: {candidate_experience} years)\n" | |
| f"- **Qualifications Match**: {qualifications_similarity:.2f}%\n\n" | |
| f"The overall assessment indicates that the candidate is a {fit_status} for the role. " | |
| f"Their skills in {', '.join(resume_skills)} align with the job's requirements of {', '.join(job_description_skills)}. " | |
| f"Based on their experience in web application development, particularly with technologies like {', '.join(resume_skills)}, they could contribute effectively to our team.\n\n" | |
| f"**Recommendation**: {recommendation}\n" | |
| ) | |
| return message | |
| # --- Updated Resume Analysis Function --- # | |
| def analyze_resume(resume_file, job_description_file): | |
| # Load and preprocess resume and job description | |
| resume_text = extract_text_from_file(resume_file) | |
| job_description_text = extract_text_from_file(job_description_file) | |
| # Extract skills, qualifications, and experience from the resume | |
| resume_skills = extract_skills_llama(resume_text) | |
| resume_qualifications = extract_qualifications(resume_text) | |
| resume_experience, _ = extract_experience(resume_text) | |
| total_experience = sum(resume_experience) # Assuming this returns a list of experiences | |
| # Extract required skills, qualifications, and experience from the job description | |
| job_description_skills = process_job_description(job_description_text) | |
| job_description_qualifications = extract_qualifications(job_description_text) | |
| job_description_experience, _ = extract_experience(job_description_text) | |
| required_experience = sum(job_description_experience) # Assuming total years required | |
| # Calculate similarity scores | |
| skills_similarity = len(set(resume_skills).intersection(set(job_description_skills))) / len(job_description_skills) * 100 if job_description_skills else 0 | |
| qualifications_similarity = len(set(resume_qualifications).intersection(set(job_description_qualifications))) / len(job_description_qualifications) * 100 if job_description_qualifications else 0 | |
| experience_similarity = 1.0 if total_experience >= required_experience else 0.0 | |
| # Sentiment analysis of the resume | |
| sentiment_analysis_result = analyze_sentiment(resume_text) | |
| # Fit assessment logic | |
| fit_score = 0 | |
| if total_experience >= required_experience: | |
| fit_score += 1 | |
| if skills_similarity > 50: # Define a threshold for skills match | |
| fit_score += 1 | |
| if qualifications_similarity > 50: # Define a threshold for qualifications match | |
| fit_score += 1 | |
| # Determine fit | |
| if fit_score == 3: | |
| fit_assessment = "Strong fit" | |
| elif fit_score == 2: | |
| fit_assessment = "Moderate fit" | |
| else: | |
| fit_assessment = "Not a fit" | |
| # Prepare output messages for tab display | |
| summary_message = ( | |
| f"### Summary of Analysis\n" | |
| f"- **Skills Similarity**: {skills_similarity:.2f}%\n" | |
| f"- **Qualifications Similarity**: {qualifications_similarity:.2f}%\n" | |
| f"- **Experience Similarity**: {experience_similarity * 100:.2f}%\n" | |
| f"- **Candidate Experience**: {total_experience} years\n" | |
| f"- **Fit Assessment**: {fit_assessment}\n" | |
| f"- **Sentiment Analysis**: {sentiment_analysis_result}\n" | |
| ) | |
| skills_message = ( | |
| f"### Skills Overview\n" | |
| f"- **Resume Skills:**\n" + "\n".join(f" - {skill}" for skill in resume_skills) + "\n" | |
| f"- **Job Description Skills:**\n" + "\n".join(f" - {skill}" for skill in job_description_skills) + "\n" | |
| ) | |
| qualifications_message = ( | |
| f"### Qualifications Overview\n" | |
| f"- **Resume Qualifications:** " + ", ".join(resume_qualifications) + "\n" + | |
| f"- **Job Description Qualifications:** " + ", ".join(job_description_qualifications) + "\n" | |
| ) | |
| experience_message = ( | |
| f"### Experience Overview\n" | |
| f"- **Total Experience:** {total_experience} years\n" | |
| f"- **Required Experience:** {required_experience} years\n" | |
| ) | |
| # Generate communication based on analysis | |
| communication = communication_generator(resume_skills, job_description_skills, skills_similarity, qualifications_similarity, experience_similarity, total_experience) | |
| return summary_message, skills_message, qualifications_message, experience_message, communication | |
| def run_gradio_interface(): | |
| with gr.Blocks() as demo: | |
| gr.Markdown("## Resume and Job Description Analyzer") | |
| resume_file = gr.File(label="Upload Resume") | |
| job_description_file = gr.File(label="Upload Job Description") | |
| # Create placeholders for output messages | |
| summary_output = gr.Textbox(label="Summary of Analysis", interactive=False, lines=10) | |
| skills_output = gr.Textbox(label="Skills Overview", interactive=False, lines=10) | |
| qualifications_output = gr.Textbox(label="Qualifications Overview", interactive=False, lines=10) | |
| experience_output = gr.Textbox(label="Experience Overview", interactive=False, lines=10) | |
| # Create tabs for output sections | |
| with gr.Tab("Analysis Summary"): | |
| gr.Markdown("### Summary of Analysis") | |
| summary_output # This automatically renders the output box | |
| with gr.Tab("Skills Overview"): | |
| gr.Markdown("### Skills Overview") | |
| skills_output # This automatically renders the output box | |
| with gr.Tab("Qualifications Overview"): | |
| gr.Markdown("### Qualifications Overview") | |
| qualifications_output # This automatically renders the output box | |
| with gr.Tab("Experience Overview"): | |
| gr.Markdown("### Experience Overview") | |
| experience_output # This automatically renders the output box | |
| analyze_button = gr.Button("Analyze") | |
| def analyze(resume, job_desc): | |
| if resume and job_desc: | |
| summary, skills, qualifications, experience = analyze_resume(resume, job_desc) | |
| return summary, skills, qualifications, experience | |
| return "Please upload both files." | |
| analyze_button.click(analyze, inputs=[resume_file, job_description_file], outputs=[summary_output, skills_output, qualifications_output, experience_output]) | |
| demo.launch() | |
| if __name__ == "__main__": | |
| run_gradio_interface() | |