Spaces:
Sleeping
Sleeping
| from huggingface_hub import InferenceClient | |
| import random | |
| from flask import Flask, request, jsonify, redirect, url_for | |
| from flask_cors import CORS | |
| from sqlalchemy import create_engine | |
| from sqlalchemy.orm import sessionmaker | |
| client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1") | |
| connection_string = "postgresql://data_owner:PFAnX9oJp4wV@ep-green-heart-a78sxj65.ap-southeast-2.aws.neon.tech/figurecircle?sslmode=require" | |
| engine = create_engine(connection_string) | |
| Session = sessionmaker(bind=engine) | |
| app = Flask(__name__) | |
| CORS(app) | |
| def home(): | |
| return jsonify({"message": "Welcome to the Recommendation API!"}) | |
| def format_prompt(message): | |
| # Generate a random user prompt and bot response pair | |
| user_prompt = "UserPrompt" | |
| bot_response = "BotResponse" | |
| return f"<s>[INST] {user_prompt} [/INST] {bot_response}</s> [INST] {message} [/INST]" | |
| def ai_mentor(): | |
| data = request.get_json() | |
| message = data.get('message') | |
| if not message: | |
| return jsonify({"message": "Missing message"}), 400 | |
| temperature = 0.9 | |
| max_new_tokens = 256 | |
| top_p = 0.95 | |
| repetition_penalty = 1.0 | |
| generate_kwargs = dict( | |
| temperature=temperature, | |
| max_new_tokens=max_new_tokens, | |
| top_p=top_p, | |
| repetition_penalty=repetition_penalty, | |
| do_sample=True, | |
| seed=42, | |
| ) | |
| # Define prompt for the conversation | |
| prompt = f""" prompt: | |
| Act as an mentor | |
| User: {message}""" | |
| formatted_prompt = format_prompt(prompt) | |
| try: | |
| # Generate response from the Language Model | |
| response = client.text_generation(formatted_prompt, **generate_kwargs, stream=False, details=False, return_full_text=False) | |
| return jsonify({"response": response}), 200 | |
| except Exception as e: | |
| return jsonify({"message": f"Failed to process request: {str(e)}"}), 500 | |
| def get_course(): | |
| temperature = 0.9 | |
| max_new_tokens = 256 | |
| top_p = 0.95 | |
| repetition_penalty = 1.0 | |
| content = request.json | |
| # user_degree = content.get('degree') # Uncomment this line | |
| user_stream = content.get('stream') | |
| generate_kwargs = dict( | |
| temperature=temperature, | |
| max_new_tokens=max_new_tokens, | |
| top_p=top_p, | |
| repetition_penalty=repetition_penalty, | |
| do_sample=True, | |
| seed=42, | |
| ) | |
| prompt = f""" prompt: | |
| You need to act like as recommendation engine for course recommendation for a student. Below are current details. | |
| Stream: {user_stream} | |
| Based on current details recommend the courses for higher degree. | |
| Note: Output should be list in below format: | |
| [course1, course2, course3,...] | |
| Return only answer not prompt and unnecessary stuff, also dont add any special characters or punctuation marks | |
| """ | |
| formatted_prompt = format_prompt(prompt) | |
| stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=False, details=False, return_full_text=False) | |
| return jsonify({"ans": stream}) | |
| def get_mentor(): | |
| temperature = 0.9 | |
| max_new_tokens = 256 | |
| top_p = 0.95 | |
| repetition_penalty = 1.0 | |
| content = request.json | |
| user_stream = content.get('stream') | |
| session = Session() | |
| # Query verified mentors | |
| verified_mentors = session.query(Mentor).filter_by(verified=True).all() | |
| mentor_list = [{"id": mentor.id, "mentor_name": mentor.mentor_name, "skills": mentor.skills, | |
| "qualification": mentor.qualification, "experience": mentor.experience, | |
| "verified": mentor.verified} for mentor in verified_mentors] | |
| session.close() | |
| mentors_data= mentor_list | |
| temperature = float(temperature) | |
| if temperature < 1e-2: | |
| temperature = 1e-2 | |
| top_p = float(top_p) | |
| generate_kwargs = dict( | |
| temperature=temperature, | |
| max_new_tokens=max_new_tokens, | |
| top_p=top_p, | |
| repetition_penalty=repetition_penalty, | |
| do_sample=True, | |
| seed=42, | |
| ) | |
| prompt = f""" prompt: | |
| You need to act like as recommendataion engine for mentor recommendation for student based on below details also the list of mentors with their experience is attached. | |
| Stream: {user_stream} | |
| Mentor list= {mentors_data} | |
| Based on above details recommend the mentor that realtes to above details | |
| Note: Output should be list in below format: | |
| [mentor1,mentor2,mentor3,...] | |
| """ | |
| formatted_prompt = format_prompt(prompt) | |
| stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=False, details=False, return_full_text=False) | |
| return jsonify({"ans": stream}) | |
| def get_streams(): | |
| temperature = 0.9 | |
| max_new_tokens = 256 | |
| top_p = 0.95 | |
| repetition_penalty = 1.0 | |
| generate_kwargs = dict( | |
| temperature=temperature, | |
| max_new_tokens=max_new_tokens, | |
| top_p=top_p, | |
| repetition_penalty=repetition_penalty, | |
| do_sample=True, | |
| seed=42, | |
| ) | |
| prompt = f""" prompt: | |
| You need to act like as recommendation engine. | |
| List all 40+ streams/branches in like computer science, chemical engineering, aerospace , etc | |
| Note: Output should be list in below format: | |
| [branch1, branch2, branch3,...] | |
| Return only answer not prompt and unnecessary stuff, also dont add any special characters or punctuation marks | |
| """ | |
| formatted_prompt = format_prompt(prompt) | |
| stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=False, details=False, return_full_text=False) | |
| return jsonify({"ans": stream}) | |
| def get_education_profiles(): | |
| temperature = 0.9 | |
| max_new_tokens = 256 | |
| top_p = 0.95 | |
| repetition_penalty = 1.0 | |
| generate_kwargs = dict( | |
| temperature=temperature, | |
| max_new_tokens=max_new_tokens, | |
| top_p=top_p, | |
| repetition_penalty=repetition_penalty, | |
| do_sample=True, | |
| seed=42, | |
| ) | |
| sectors = ["engineering", "medical", "arts", "commerce", "science", "management"] # Example sectors | |
| prompt = f"""prompt: | |
| You need to act like a recommendation engine. | |
| List all education-related profiles in sectors like {', '.join(sectors)}. | |
| Note: Output should be a list in the below format: | |
| [profile1, profile2, profile3,...] | |
| Return only the answer, not the prompt or unnecessary stuff, and don't add any special characters or punctuation marks. | |
| """ | |
| formatted_prompt = format_prompt(prompt) | |
| education_profiles = client.text_generation(formatted_prompt, **generate_kwargs, stream=False, details=False, return_full_text=False) | |
| return jsonify({"ans": education_profiles}) | |
| def get_certificate(): | |
| temperature = 0.9 | |
| max_new_tokens = 256 | |
| top_p = 0.95 | |
| repetition_penalty = 1.0 | |
| content = request.json | |
| # user_degree = content.get('degree') # Uncomment this line | |
| user_stream = content.get('stream') | |
| generate_kwargs = dict( | |
| temperature=temperature, | |
| max_new_tokens=max_new_tokens, | |
| top_p=top_p, | |
| repetition_penalty=repetition_penalty, | |
| do_sample=True, | |
| seed=42, | |
| ) | |
| prompt = f""" prompt: | |
| You need to act like as recommendation engine for certification recommendation for a student. Below are current details. | |
| Stream: {user_stream} | |
| Based on current details recommend the certification | |
| Note: Output should be list in below format: | |
| [course1, course2, course3,...] | |
| Return only answer not prompt and unnecessary stuff, also dont add any special characters or punctuation marks | |
| """ | |
| formatted_prompt = format_prompt(prompt) | |
| stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=False, details=False, return_full_text=False) | |
| return jsonify({"ans": stream}) | |
| def get_competition(): | |
| temperature = 0.9 | |
| max_new_tokens = 256 | |
| top_p = 0.95 | |
| repetition_penalty = 1.0 | |
| content = request.json | |
| # user_degree = content.get('degree') # Uncomment this line | |
| user_stream = content.get('stream') | |
| generate_kwargs = dict( | |
| temperature=temperature, | |
| max_new_tokens=max_new_tokens, | |
| top_p=top_p, | |
| repetition_penalty=repetition_penalty, | |
| do_sample=True, | |
| seed=42, | |
| ) | |
| prompt = f""" prompt: | |
| You need to act like as recommendation engine for competition recommendation for a student. Below are current details. | |
| Stream: {user_stream} | |
| Based on current details recommend the competition | |
| Note: Output should be list in below format: | |
| [course1, course2, course3,...] | |
| Return only answer not prompt and unnecessary stuff, also dont add any special characters or punctuation marks | |
| """ | |
| formatted_prompt = format_prompt(prompt) | |
| stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=False, details=False, return_full_text=False) | |
| return jsonify({"ans": stream}) | |
| if __name__ == '__main__': | |
| app.run(debug=True) | |