diff --git "a/session_page.py" "b/session_page.py" new file mode 100644--- /dev/null +++ "b/session_page.py" @@ -0,0 +1,2892 @@ +from collections import defaultdict +import json +import random +import requests +import streamlit as st +from datetime import datetime, timedelta +from youtube_transcript_api import YouTubeTranscriptApi +from utils.helpers import display_progress_bar, create_notification, format_datetime +from file_upload_vectorize import upload_resource, extract_text_from_file, create_vector_store, resources_collection, model, assignment_submit +from db import courses_collection2, chat_history_collection, students_collection, faculty_collection, vectors_collection +from chatbot import give_chat_response +from bson import ObjectId +from live_polls import LivePollFeature +import pandas as pd +import plotly.express as px +from dotenv import load_dotenv +import os +from pymongo import MongoClient +from gen_mcqs import generate_mcqs, save_quiz, quizzes_collection, get_student_quiz_score, submit_quiz_answers +from create_course import courses_collection +# from pre_class_analytics import NovaScholarAnalytics +from pre_class_analytics2 import NovaScholarAnalytics +import openai +from openai import OpenAI +import google.generativeai as genai +from google.generativeai import caching +from goals2 import GoalAnalyzer +from openai import OpenAI +import asyncio +import numpy as np +import re +from analytics import derive_analytics, create_embeddings, cosine_similarity +from bs4 import BeautifulSoup +import streamlit.components.v1 as components +from live_chat_feature import display_live_chat_interface +from code_playground import display_code_playground +from urllib.parse import urlparse, parse_qs +from bs4 import BeautifulSoup +from rubrics import display_rubrics_tab +from subjective_test_evaluation import evaluate_subjective_answers, display_evaluation_to_faculty + +# Load environment variables +load_dotenv() +MONGO_URI = os.getenv('MONGO_URI') +PERPLEXITY_API_KEY = os.getenv('PERPLEXITY_KEY') +OPENAI_API_KEY = os.getenv('OPENAI_KEY') +client = MongoClient(MONGO_URI) +db = client["novascholar_db"] +polls_collection = db["polls"] +subjective_tests_collection = db["subjective_tests"] +subjective_test_evaluation_collection = db["subjective_test_evaluation"] +assignment_evaluation_collection = db["assignment_evaluation"] +subjective_tests_collection = db["subjective_tests"] +synoptic_store_collection = db["synoptic_store"] +assignments_collection = db["assignments"] + +# for implementing Context Caching: +# PROJECT_ID = "novascholar-446709" +# vertexai.init(project=PROJECT_ID, location="us-west4") + +def get_current_user(): + if 'current_user' not in st.session_state: + return None + return students_collection.find_one({"_id": st.session_state.user_id}) + +# def display_preclass_content(session, student_id, course_id): + """Display pre-class materials for a session""" + + # Initialize 'messages' in session_state if it doesn't exist + if 'messages' not in st.session_state: + st.session_state.messages = [] + + # Display pre-class materials + materials = list(resources_collection.find({"course_id": course_id, "session_id": session['session_id']})) + st.subheader("Pre-class Materials") + + if materials: + for material in materials: + with st.expander(f"{material['file_name']} ({material['material_type'].upper()})"): + file_type = material.get('file_type', 'unknown') + if file_type == 'application/pdf': + st.markdown(f"đ [Open PDF Document]({material['file_name']})") + if st.button("View PDF", key=f"view_pdf_{material['file_name']}"): + st.text_area("PDF Content", material['text_content'], height=300) + if st.button("Download PDF", key=f"download_pdf_{material['file_name']}"): + st.download_button( + label="Download PDF", + data=material['file_content'], + file_name=material['file_name'], + mime='application/pdf' + ) + if st.button("Mark PDF as Read", key=f"pdf_{material['file_name']}"): + create_notification("PDF marked as read!", "success") + else: + st.info("No pre-class materials uploaded by the faculty.") + st.subheader("Upload Pre-class Material") + + # File upload section for students + uploaded_file = st.file_uploader("Upload Material", type=['txt', 'pdf', 'docx']) + if uploaded_file is not None: + with st.spinner("Processing document..."): + file_name = uploaded_file.name + file_content = extract_text_from_file(uploaded_file) + if file_content: + material_type = st.selectbox("Select Material Type", ["pdf", "docx", "txt"]) + if st.button("Upload Material"): + upload_resource(course_id, session['session_id'], file_name, uploaded_file, material_type) + + # Search for the newly uploaded resource's _id in resources_collection + resource_id = resources_collection.find_one({"file_name": file_name})["_id"] + create_vector_store(file_content, resource_id) + st.success("Material uploaded successfully!") + + st.subheader("Learn the Topic Using Chatbot") + st.write(f"**Session Title:** {session['title']}") + st.write(f"**Description:** {session.get('description', 'No description available.')}") + + # Chatbot interface + if prompt := st.chat_input("Ask a question about the session topic"): + if len(st.session_state.messages) >= 20: + st.warning("Message limit (20) reached for this session.") + return + + st.session_state.messages.append({"role": "user", "content": prompt}) + + # Display User Message + with st.chat_message("user"): + st.markdown(prompt) + + # Get response from chatbot + context = "" + for material in materials: + if 'text_content' in material: + context += material['text_content'] + "\n" + + response = give_chat_response(student_id, session['session_id'], prompt, session['title'], session.get('description', ''), context) + st.session_state.messages.append({"role": "assistant", "content": response}) + + # Display Assistant Response + with st.chat_message("assistant"): + st.markdown(response) + + # st.subheader("Your Chat History") + # for message in st.session_state.messages: + # content = message.get("content", "") # Default to an empty string if "content" is not present + # role = message.get("role", "user") # Default to "user" if "role" is not present + # with st.chat_message(role): + # st.markdown(content) + # user = get_current_user() + +def display_preclass_content(session, student_id, course_id): + """Display pre-class materials for a session including external resources""" + st.subheader("Pre-class Materials") + print("Session ID is: ", session['session_id']) + + # Display uploaded materials + materials = resources_collection.find({"session_id": session['session_id']}) + + for material in materials: + file_type = material.get('file_type', 'unknown') + + # Handle external resources + if file_type == 'external' or file_type == 'video': + with st.expander(f"đ {material['file_name']}"): + st.markdown(f"Source: [{material['source_url']}]({material['source_url']})") + + if material['material_type'].lower() == 'video': + # Embed YouTube video if it's a YouTube URL + if 'youtube.com' in material['source_url'] or 'youtu.be' in material['source_url']: + video_id = extract_youtube_id(material['source_url']) + if video_id: + st.video(f"https://youtube.com/watch?v={video_id}") + + if st.button("View Content", key=f"view_external_{material['_id']}"): + st.text_area("Extracted Content", material['text_content'], height=300) + + if st.button("Mark as Read", key=f"external_{material['_id']}"): + create_notification(f"{material['material_type']} content marked as read!", "success") + + # Handle traditional file types + else: + with st.expander(f"{material['file_name']} ({material['material_type'].upper()})"): + if file_type == 'application/pdf': + st.markdown(f"đ [Open PDF Document]({material['file_name']})") + if st.button("View PDF", key=f"view_pdf_{material['_id']}"): + st.text_area("PDF Content", material['text_content'], height=300) + if st.button("Download PDF", key=f"download_pdf_{material['_id']}"): + st.download_button( + label="Download PDF", + data=material['file_content'], + file_name=material['file_name'], + mime='application/pdf' + ) + if st.button("Mark PDF as Read", key=f"pdf_{material['_id']}"): + create_notification("PDF marked as read!", "success") + + elif file_type == 'text/plain': + st.markdown(f"đ [Open Text Document]({material['file_name']})") + if st.button("View Text", key=f"view_text_{material['_id']}"): + st.text_area("Text Content", material['text_content'], height=300) + if st.button("Download Text", key=f"download_text_{material['_id']}"): + st.download_button( + label="Download Text", + data=material['file_content'], + file_name=material['file_name'], + mime='text/plain' + ) + if st.button("Mark Text as Read", key=f"text_{material['_id']}"): + create_notification("Text marked as read!", "success") + + elif file_type == 'application/vnd.openxmlformats-officedocument.wordprocessingml.document': + st.markdown(f"đ [Open Word Document]({material['file_name']})") + if st.button("View Word", key=f"view_word_{material['_id']}"): + st.text_area("Word Content", material['text_content'], height=300) + if st.button("Download Word", key=f"download_word_{material['_id']}"): + st.download_button( + label="Download Word", + data=material['file_content'], + file_name=material['file_name'], + mime='application/vnd.openxmlformats-officedocument.wordprocessingml.document' + ) + if st.button("Mark Word as Read", key=f"word_{material['_id']}"): + create_notification("Word document marked as read!", "success") + + elif file_type == 'application/vnd.openxmlformats-officedocument.presentationml.presentation': + st.markdown(f"đ [Open PowerPoint Presentation]({material['file_name']})") + if st.button("View PowerPoint", key=f"view_pptx_{material['_id']}"): + st.text_area("PowerPoint Content", material['text_content'], height=300) + if st.button("Download PowerPoint", key=f"download_pptx_{material['_id']}"): + st.download_button( + label="Download PowerPoint", + data=material['file_content'], + file_name=material['file_name'], + mime='application/vnd.openxmlformats-officedocument.presentationml.presentation' + ) + if st.button("Mark PowerPoint as Read", key=f"pptx_{material['_id']}"): + create_notification("PowerPoint presentation marked as read!", "success") + + + # Initialize 'messages' in session_state if it doesn't exist + if 'messages' not in st.session_state: + st.session_state.messages = [] + + # Chat input + # Add a check, if materials are available, only then show the chat input + if(st.session_state.user_type == "student"): + if materials: + if prompt := st.chat_input("Ask a question about Pre-class Materials"): + # if len(st.session_state.messages) >= 20: + # st.warning("Message limit (20) reached for this session.") + # return + + st.session_state.messages.append({"role": "user", "content": prompt}) + + # Display User Message + with st.chat_message("user"): + st.markdown(prompt) + + # Get document context + context = "" + print("Session ID is: ", session['session_id']) + materials = resources_collection.find({"session_id": session['session_id']}) + print(materials) + context = "" + vector_data = None + + # for material in materials: + # print(material) + context = "" + for material in materials: + resource_id = material['_id'] + print("Supposed Resource ID is: ", resource_id) + vector_data = vectors_collection.find_one({"resource_id": resource_id}) + # print(vector_data) + if vector_data and 'text' in vector_data: + context += vector_data['text'] + "\n" + + if not vector_data: + st.error("No Pre-class materials found for this session.") + return + + try: + # Generate response using Gemini + # context_prompt = f""" + # Based on the following context, answer the user's question: + + # Context: + # {context} + + # Question: {prompt} + + # Please provide a clear and concise answer based only on the information provided in the context. + # """ + # context_prompt = f""" + # You are a highly intelligent and resourceful assistant capable of synthesizing information from the provided context. + + # Context: + # {context} + + # Instructions: + # 1. Base your answers primarily on the given context. + # 2. If the answer to the user's question is not explicitly in the context but can be inferred or synthesized from the information provided, do so thoughtfully. + # 3. Only use external knowledge or web assistance when: + # - The context lacks sufficient information, and + # - The question requires knowledge beyond what can be reasonably inferred from the context. + # 4. Clearly state if you are relying on web assistance for any part of your answer. + # 5. Do not respond with a negative. If the answer is not in the context, provide a thoughtful response based on the information available on the web about it. + + # Question: {prompt} + + # Please provide a clear and comprehensive answer based on the above instructions. + # """ + context_prompt = f""" + You are a highly intelligent and resourceful assistant capable of synthesizing information from the provided context and external sources. + + Context: + {context} + + Instructions: + 1. Base your answers on the provided context wherever possible. + 2. If the answer to the user's question is not explicitly in the context: + - Use external knowledge or web assistance to provide a clear and accurate response. + 3. Do not respond negatively. If the answer is not in the context, use web assistance or your knowledge to generate a thoughtful response. + 4. Clearly state if part of your response relies on web assistance. + + Question: {prompt} + + Please provide a clear and comprehensive answer based on the above instructions. + """ + + response = model.generate_content(context_prompt) + if not response or not response.text: + st.error("No response received from the model") + return + + assistant_response = response.text + # Display Assistant Response + with st.chat_message("assistant"): + st.markdown(assistant_response) + + # Build the message + new_message = { + "prompt": prompt, + "response": assistant_response, + "timestamp": datetime.utcnow() + } + st.session_state.messages.append(new_message) + # Update database + try: + chat_history_collection.update_one( + { + "user_id": student_id, + "session_id": session['session_id'] + }, + { + "$push": {"messages": new_message}, + "$setOnInsert": { + "user_id": student_id, + "session_id": session['session_id'], + "timestamp": datetime.utcnow() + } + }, + upsert=True + ) + except Exception as db_error: + st.error(f"Error saving chat history: {str(db_error)}") + except Exception as e: + st.error(f"Error generating response: {str(e)}") + + else: + st.subheader("Upload Pre-class Material") + # File upload section for students + uploaded_file = st.file_uploader("Upload Material", type=['txt', 'pdf', 'docx']) + if uploaded_file is not None: + with st.spinner("Processing document..."): + file_name = uploaded_file.name + file_content = extract_text_from_file(uploaded_file) + if file_content: + material_type = st.selectbox("Select Material Type", ["pdf", "docx", "txt"]) + if st.button("Upload Material"): + upload_resource(course_id, session['session_id'], file_name, uploaded_file, material_type) + # print("Resource ID is: ", resource_id) + # Search for the newly uploaded resource's _id in resources_collection + # resource_id = resources_collection.find_one({"file_name": file_name})["_id"] + st.success("Material uploaded successfully!") + # st.experimental_rerun() + # st.subheader("Your Chat History") + if st.button("View Chat History"): + # Initialize chat messages from database + if 'messages' not in st.session_state or not st.session_state.messages: + existing_chat = chat_history_collection.find_one({ + "user_id": student_id, + "session_id": session['session_id'] + }) + if existing_chat and 'messages' in existing_chat: + st.session_state.messages = existing_chat['messages'] + else: + st.session_state.messages = [] + + # Display existing chat history + try: + for message in st.session_state.messages: + if 'prompt' in message and 'response' in message: + with st.chat_message("user"): + st.markdown(message["prompt"]) + with st.chat_message("assistant"): + st.markdown(message["response"]) + except Exception as e: + st.error(f"Error displaying chat history: {str(e)}") + st.session_state.messages = [] + + if st.session_state.user_type == 'student': + st.subheader("Create a Practice Quiz") + questions = [] + quiz_id = "" + with st.form("create_quiz_form"): + num_questions = st.number_input("Number of Questions", min_value=1, max_value=20, value=2) + submit_quiz = st.form_submit_button("Generate Quiz") + if submit_quiz: + # Get pre-class materials from resources_collection + materials = resources_collection.find({"session_id": session['session_id']}) + context = "" + for material in materials: + if 'text_content' in material: + context += material['text_content'] + "\n" + + if not context: + st.error("No pre-class materials found for this session.") + return + + # Generate MCQs from context + questions = generate_mcqs(context, num_questions, session['title'], session.get('description', '')) + if questions: + quiz_id = save_quiz(course_id, session['session_id'], "Practice Quiz", questions, student_id) + if quiz_id: + st.success("Quiz saved successfully!") + st.session_state.show_quizzes = True + else: + st.error("Error saving quiz.") + else: + st.error("Error generating questions.") + + # if st.button("Attempt Practice Quizzes "): + # quizzes = list(quizzes_collection.find({"course_id": course_id, "session_id": session['session_id'], "user_id": student_id})) + + + if getattr(st.session_state, 'show_quizzes', False): + # quiz = quizzes_collection.find_one({"course_id": course_id, "session_id": session['session_id'], "user_id": student_id}) + quiz = quizzes_collection.find_one( + {"course_id": course_id, "session_id": session['session_id'], "user_id": student_id}, + sort=[("created_at", -1)] + ) + if not quiz: + st.info("No practice quizzes created.") + else: + with st.expander(f"đ Practice Quiz", expanded=False): + # Check if student has already taken this quiz + existing_score = get_student_quiz_score(quiz['_id'], student_id) + + if existing_score is not None: + st.success(f"Quiz completed! Your score: {existing_score:.1f}%") + + # Display correct answers after submission + st.subheader("Quiz Review") + for i, question in enumerate(quiz['questions']): + st.markdown(f"**Question {i+1}:** {question['question']}") + for opt in question['options']: + if opt.startswith(question['correct_option']): + st.markdown(f"â {opt}") + else: + st.markdown(f"- {opt}") + + else: + # Initialize quiz state for this specific quiz + quiz_key = f"quiz_{quiz['_id']}_student_{student_id}" + if quiz_key not in st.session_state: + st.session_state[quiz_key] = { + 'submitted': False, + 'score': None, + 'answers': {} + } + + # If quiz was just submitted, show the results + if st.session_state[quiz_key]['submitted']: + st.success(f"Quiz submitted successfully! Your score: {st.session_state[quiz_key]['score']:.1f}%") + # Reset the quiz state + st.session_state[quiz_key]['submitted'] = False + + + # Display quiz questions + st.write("Please select your answers:") + + # Create a form for quiz submission + form_key = f"quiz_form_{quiz['_id']}_student_{student_id}" + with st.form(key=form_key): + student_answers = {} + + for i, question in enumerate(quiz['questions']): + st.markdown(f"**Question {i+1}:** {question['question']}") + options = [opt for opt in question['options']] + # student_answers[str(i)] = st.radio( + # f"Select answer for question {i+1}:", + # options=options, + # key=f"q_{i}", + # index=None + # ) + answer = st.radio( + f"Select answer for question {i+1}:", + options=options, + key=f"{quiz['_id']}_{i}", # Simplify the radio button key + index=None + ) + if answer: # Only add to answers if a selection was made + student_answers[str(i)] = answer + + # Submit button + # submitted = st.form_submit_button("Submit Quiz") + print("Before the submit button") + submit_button = st.form_submit_button("Submit Quiz") + print("After the submit button") + if submit_button and student_answers: + print("Clicked the button") + print(student_answers) + correct_answers = 0 + for i, question in enumerate(quiz['questions']): + if student_answers[str(i)] == question['correct_option']: + correct_answers += 1 + score = (correct_answers / len(quiz['questions'])) * 100 + + if score is not None: + st.success(f"Quiz submitted successfully! Your score: {score:.1f}%") + st.session_state[quiz_key]['submitted'] = True + st.session_state[quiz_key]['score'] = score + st.session_state[quiz_key]['answers'] = student_answers + # This will trigger a rerun, but now we'll handle it properly + st.rerun() + + else: + st.error("Error submitting quiz. Please try again.") + # correct_answers = 0 + # for i, question in enumerate(quiz['questions']): + # if student_answers[str(i)] == question['correct_option']: + # correct_answers += 1 + # score = (correct_answers / len(quiz['questions'])) * 100 + # print(score) + # try: + # quizzes_collection.update_one( + # {"_id": quiz['_id']}, + # {"$push": {"submissions": {"student_id": student_id, "score": score}}} + # ) + # st.success(f"Quiz submitted successfully! Your score: {score:.1f}%") + # except Exception as db_error: + # st.error(f"Error saving submission: {str(db_error)}") + +import requests + +def get_supported_url_formats(): + """Return a list of supported URL formats for faculty reference""" + return """ + Supported YouTube URL formats: + 1. Standard watch URL: https://www.youtube.com/watch?v=VIDEO_ID + 2. Short URL: https://youtu.be/VIDEO_ID + 3. Embed URL: https://www.youtube.com/embed/VIDEO_ID + 4. Mobile URL: https://m.youtube.com/watch?v=VIDEO_ID + 5. YouTube Shorts: https://www.youtube.com/shorts/VIDEO_ID + + You can copy any of these formats from: + - YouTube website (Share button) + - YouTube mobile app (Share button) + - Browser address bar while watching the video + """ + + +def display_url_guidance(): + """Display guidance for faculty on how to get the correct URL""" + st.info(""" + đ How to get the correct YouTube URL: + 1. Go to the YouTube video you want to share + 2. Click the 'Share' button below the video + 3. Copy the URL provided in the share dialog + 4. Paste it here + + The URL should start with either 'youtube.com' or 'youtu.be' + """) +def fetch_youtube_video_title(video_url): + """ + Fetch the title of a YouTube video with detailed error handling + """ + api_key = os.getenv("YOUTUBE_API_KEY") + if not api_key: + st.error("â ī¸ System Configuration Error: YouTube API key not configured.") + st.write("Please contact technical support for assistance.") + return None + + video_id = extract_youtube_id(video_url) + if not video_id: + return None + + url = f"https://www.googleapis.com/youtube/v3/videos?id={video_id}&key={api_key}&part=snippet" + try: + response = requests.get(url, timeout=10) + response.raise_for_status() + + data = response.json() + if not data.get("items"): + st.error("â ī¸ Video not found or might be private.") + st.write(""" + Please check if: + 1. The video is publicly available + 2. The URL is correct + 3. The video hasn't been deleted + """) + return None + + return data["items"][0]["snippet"]["title"] + + except requests.exceptions.RequestException as e: + if "quotaExceeded" in str(e): + st.error("â ī¸ YouTube API quota exceeded.") + st.write(""" + The system has reached its daily limit for video processing. + Please try: + 1. Waiting a few hours + 2. Trying again tomorrow + 3. Contact support if the issue persists + """) + else: + st.error(f"Error fetching video title: {str(e)}") + st.write("Please try again or choose a different video.") + return None + +def upload_video_source(course_id, session_id, video_url): + """ + Upload video source and its transcript with comprehensive error handling + """ + if not video_url: + st.error("Please provide a YouTube URL.") + display_url_guidance() + return None + + # Display processing message + # with st.spinner("Processing your YouTube video..."): + # Validate video URL + video_id = extract_youtube_id(video_url) + if not video_id: + return None + + # Fetch video title + video_title = fetch_youtube_video_title(video_url) + if not video_title: + return None + + # Extract transcript + transcript = extract_youtube_transcript(video_url) + if not transcript: + return None + + # Create resource document + resource_data = { + "_id": ObjectId(), + "course_id": course_id, + "session_id": session_id, + "file_name": video_title, + "file_type": "video", + "text_content": transcript, + "material_type": "video", + "source_url": video_url, + "uploaded_at": datetime.utcnow(), + "video_id": video_id + } + + # Check if resource already exists + existing_resource = resources_collection.find_one({ + "session_id": session_id, + "video_id": video_id + }) + + if existing_resource: + st.warning("â ī¸ This video has already been added to this session.") + st.write(""" + Options: + 1. Choose a different video + 2. Use the existing video resource + 3. Remove the existing video first if you want to re-add it + """) + return existing_resource["_id"] + + try: + # Insert new resource + result = resources_collection.insert_one(resource_data) + resource_id = result.inserted_id + + # Update course document + update_result = courses_collection.update_one( + { + "course_id": course_id, + "sessions.session_id": session_id + }, + { + "$push": {"sessions.$.pre_class.resources": resource_id} + } + ) + + if update_result.modified_count == 0: + st.error("â ī¸ Failed to update course with new resource.") + st.write(""" + The video was processed but couldn't be added to the course. + This might be because: + 1. The course or session ID is invalid + 2. You don't have permission to modify this course + 3. There was a system error + + Please try again or contact support if the issue persists. + """) + # Rollback resource insertion + resources_collection.delete_one({"_id": resource_id}) + return None + + # Create vector store for the transcript + # create_vector_store(transcript, resource_id) + # Create vector store for the transcript + vector_store_result = create_vector_store(transcript, resource_id) + if not vector_store_result: + st.error("â ī¸ Failed to create vector store for the transcript.") + # Rollback insertions + resources_collection.delete_one({"_id": resource_id}) + return None + + st.success("â Video successfully added to your course!") + st.write(f""" + Added: "{video_title}" + You can now: + 1. Add more videos + 2. Preview the added video + 3. Continue building your course + """) + return resource_id + + except Exception as e: + st.error("â ī¸ Error uploading video source.") + st.write(f""" + There was an error while saving the video: + {str(e)} + + Please: + 1. Try again + 2. Choose a different video + 3. Contact support if the issue persists + """) + return None + +def upload_preclass_materials(session_id, course_id): + """Upload pre-class materials and manage external resources for a session""" + st.subheader("Pre-class Materials Management") + + # Create tabs for different functionalities + upload_tab, videos_tab, external_tab= st.tabs(["Upload Materials","Upload Video Sources","External Resources"]) + + with upload_tab: + # Original file upload functionality + uploaded_file = st.file_uploader("Upload Material", type=['txt', 'pdf', 'docx']) + if uploaded_file is not None: + with st.spinner("Processing document..."): + file_name = uploaded_file.name + file_content = extract_text_from_file(uploaded_file) + if file_content: + material_type = st.selectbox("Select Material Type", ["pdf", "docx", "txt"]) + if st.button("Upload Material"): + upload_resource(course_id, session_id, file_name, uploaded_file, material_type) + st.success("Material uploaded successfully!") + with videos_tab: + # Upload video sources + st.info("Upload video sources for this session.") + video_url = st.text_input("Enter a Youtube Video URL") + if st.button("Upload Video"): + with st.spinner("Processing video source..."): + video_resource_id = upload_video_source(course_id, session_id, video_url) + # if video_resource_id: + # st.success("Video source uploaded successfully!") + + with external_tab: + # Fetch and display external resources + session_data = courses_collection.find_one( + {"course_id": course_id, "sessions.session_id": session_id}, + {"sessions.$": 1} + ) + + if session_data and session_data.get('sessions'): + session = session_data['sessions'][0] + external = session.get('external_resources', {}) + + # Display web articles + if 'readings' in external: + st.subheader("Web Articles and Videos") + for reading in external['readings']: + col1, col2 = st.columns([3, 1]) + with col1: + st.markdown(f"**{reading['title']}**") + st.markdown(f"Type: {reading['type']} | Est. time: {reading['estimated_read_time']}") + st.markdown(f"URL: [{reading['url']}]({reading['url']})") + with col2: + if st.button("Extract Content", key=f"extract_{reading['url']}"): + with st.spinner("Extracting content..."): + content = extract_external_content(reading['url'], reading['type']) + if content: + resource_id = upload_external_resource( + course_id, + session_id, + reading['title'], + content, + reading['type'].lower(), + reading['url'] + ) + st.success("Content extracted and stored successfully!") + + # Display books + if 'books' in external: + st.subheader("Recommended Books") + for book in external['books']: + st.markdown(f""" + **{book['title']}** by {book['author']} + - ISBN: {book['isbn']} + - Chapters: {book['chapters']} + """) + + # Display additional resources + if 'additional_resources' in external: + st.subheader("Additional Resources") + for resource in external['additional_resources']: + st.markdown(f""" + **{resource['title']}** ({resource['type']}) + - {resource['description']} + - URL: [{resource['url']}]({resource['url']}) + """) + + # Display pre-class materials + # Group resources by their types + grouped_resources = defaultdict(list) + materials = resources_collection.find({"session_id": session_id}) + for material in materials: + grouped_resources[material['material_type']].append(material) + + # Display grouped resources + for material_type, resources in grouped_resources.items(): + st.markdown(f"##### {material_type.capitalize()} Resources") + for material in resources: + resource_info = f"- **{material['file_name']}** ({material['file_type']})" + if 'source_url' in material: + resource_info += f" - [URL]({material['source_url']})" + st.markdown(resource_info) + +def extract_external_content(url, content_type): + """Extract content from external resources based on their type""" + try: + if content_type.lower() == 'video' and 'youtube.com' in url: + return extract_youtube_transcript(url) + else: + return extract_web_article(url) + except Exception as e: + st.error(f"Error extracting content: {str(e)}") + return None + +def extract_youtube_transcript(url): + """ + Extract transcript from YouTube videos with detailed error handling + """ + try: + video_id = extract_youtube_id(url) + if not video_id: + return None + + # Get transcript with retries + max_retries = 3 + for attempt in range(max_retries): + try: + transcript = YouTubeTranscriptApi.get_transcript(video_id) + # Combine transcript text with proper spacing and punctuation + full_text = '' + for entry in transcript: + text = entry['text'].strip() + if text: + if not full_text.endswith(('.', '!', '?', '..."')): + full_text += '. ' + full_text += text + ' ' + return full_text.strip() + except Exception as e: + if attempt == max_retries - 1: + raise e + continue + + except Exception as e: + error_message = str(e) + if "Video unavailable" in error_message: + st.error("â ī¸ This video is unavailable or private. Please check if:") + st.write(""" + - The video is set to public or unlisted + - The video hasn't been deleted + - You have the correct URL + """) + elif "Subtitles are disabled" in error_message: + st.error("â ī¸ This video doesn't have subtitles/transcript available.") + st.write(""" + Unfortunately, this video cannot be used because: + - It doesn't have closed captions or subtitles + - The creator hasn't enabled transcript generation + + Please choose another video that has subtitles available. + You can check if a video has subtitles by: + 1. Playing the video on YouTube + 2. Clicking the 'CC' button in the video player + """) + else: + st.error(f"Could not extract YouTube transcript: {error_message}") + st.write("Please try again or choose a different video.") + return None + +def extract_web_article(url): + """Extract text content from web articles""" + try: + headers = { + 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36' + } + response = requests.get(url, headers=headers) + response.raise_for_status() + + soup = BeautifulSoup(response.text, 'html.parser') + + # Remove unwanted tags + for tag in soup(['script', 'style', 'nav', 'footer', 'header']): + tag.decompose() + + # Extract text from paragraphs + paragraphs = soup.find_all('p') + text_content = ' '.join([p.get_text().strip() for p in paragraphs]) + + return text_content + except Exception as e: + st.error(f"Could not extract web article content: {str(e)}") + return None + +def upload_external_resource(course_id, session_id, title, content, content_type, source_url): + """Upload extracted external resource content to the database""" + resource_data = { + "_id": ObjectId(), + "course_id": course_id, + "session_id": session_id, + "file_name": f"{title} ({content_type})", + "file_type": "external", + "text_content": content, + "material_type": content_type, + "source_url": source_url, + "uploaded_at": datetime.utcnow() + } + + # Check if resource already exists + existing_resource = resources_collection.find_one({ + "session_id": session_id, + "source_url": source_url + }) + + if existing_resource: + return existing_resource["_id"] + + # Insert new resource + resources_collection.insert_one(resource_data) + resource_id = resource_data["_id"] + + # Update course document + courses_collection.update_one( + { + "course_id": course_id, + "sessions.session_id": session_id + }, + { + "$push": {"sessions.$.pre_class.resources": resource_id} + } + ) + + if content: + create_vector_store(content, resource_id) + + return resource_id + +def extract_youtube_id(url): + """ + Extract YouTube video ID from various URL formats + """ + if not url: + st.error("Please provide a YouTube URL.") + display_url_guidance() + return None + + # Clean the URL + url = url.strip() + + # Basic URL validation + if not ('youtube.com' in url or 'youtu.be' in url): + st.error("This doesn't appear to be a YouTube URL.") + st.write(get_supported_url_formats()) + return None + + # Try to extract using regex patterns + patterns = [ + r'(?:youtube\.com\/watch\?v=|youtu\.be\/|youtube\.com\/embed\/|youtube\.com\/v\/|youtube\.com\/e\/|youtube\.com\/shorts\/)([^&\n?#]+)', + r'(?:youtube\.com\/(?:[^\/]+\/.+\/|(?:v|e(?:mbed)?)\/|.*[?&]v=)|youtu\.be\/)([^"&?\/\s]{11})' + ] + + for pattern in patterns: + match = re.search(pattern, url) + if match: + video_id = match.group(1) + if len(video_id) != 11: # YouTube IDs are always 11 characters + st.error("Invalid YouTube video ID length. Please check your URL.") + display_url_guidance() + return None + return video_id + + # If regex fails, try parsing URL components + try: + parsed_url = urlparse(url) + if 'youtube.com' in parsed_url.netloc: + query_params = parse_qs(parsed_url.query) + if 'v' in query_params: + return query_params['v'][0] + elif 'youtu.be' in parsed_url.netloc: + return parsed_url.path.lstrip('/') + except Exception: + pass + + # If all extraction methods fail + st.error("Could not extract video ID from the provided URL.") + st.write(get_supported_url_formats()) + return None + +def display_live_presentation(session, user_type, course_id): + st.markdown("### Live Presentation") + + # Get active presentation + session_data = courses_collection.find_one( + {"course_id": course_id, "sessions.session_id": session['session_id']}, + {"sessions.$": 1} + ) + active_presentation = session_data["sessions"][0].get("in_class", {}).get("active_presentation") + + # Faculty Interface + if user_type == 'faculty': + if not active_presentation: + st.markdown(""" + + """, unsafe_allow_html=True) + + # URL input section + with st.container(): + ppt_url = st.text_input("đ Enter Google Slides Presentation URL", + placeholder="https://docs.google.com/presentation/...") + + if ppt_url: + if st.button("âļī¸ Activate Presentation", + use_container_width=True): + courses_collection.update_one( + {"course_id": course_id, "sessions.session_id": session['session_id']}, + {"$set": {"sessions.$.in_class.active_presentation": ppt_url}} + ) + st.success("â Presentation activated successfully!") + st.rerun() + else: + # Display active presentation + st.markdown("#### đ¯ Active Presentation") + components.iframe(active_presentation, height=800) + + # Deactivate button + st.markdown("
", unsafe_allow_html=True) + if st.button("âšī¸ Deactivate Presentation", + type="secondary", + use_container_width=True): + courses_collection.update_one( + {"course_id": course_id, "sessions.session_id": session['session_id']}, + {"$unset": {"sessions.$.in_class.active_presentation": ""}} + ) + st.success("â Presentation deactivated successfully!") + st.rerun() + + # Student Interface + else: + if active_presentation: + st.markdown("#### đ¯ Active Presentation") + components.iframe(active_presentation, height=800) + else: + st.info("đ No active presentations at this time.") + +def display_in_class_content(session, user_type, course_id, user_id): + # """Display in-class activities and interactions""" + """Display in-class activities and interactions""" + st.header("In-class Activities") + + # Initialize Live Polls feature + live_polls = LivePollFeature() + + # Display appropriate interface based on user role + if user_type == 'faculty': + live_polls.display_faculty_interface(session['session_id']) + else: + live_polls.display_student_interface(session['session_id']) + + display_live_chat_interface(session, user_id, course_id=course_id) + + # Live Presentation Feature + display_live_presentation(session, user_type, course_id) + +def generate_random_assignment_id(): + """Generate a random integer ID for assignments""" + return random.randint(100000, 999999) + +def display_post_class_content(session, student_id, course_id): + """Display post-class assignments and submissions""" + st.header("Post-class Work") + + if st.session_state.user_type == 'faculty': + faculty_id = st.session_state.user_id + st.subheader("Create Subjective Test") + + # Create a form for test generation + with st.form("create_subjective_test_form"): + test_title = st.text_input("Test Title") + num_subjective_questions = st.number_input("Number of Subjective Questions", min_value=1, value=5) + generation_method = st.radio( + "Question Generation Method", + ["Generate from Pre-class Materials", "Generate Random Questions"] + ) + generate_test_btn = st.form_submit_button("Generate Test") + + # Handle test generation outside the form + if generate_test_btn: + if not test_title: + st.error("Please enter a test title.") + return + + context = "" + if generation_method == "Generate from Pre-class Materials": + materials = resources_collection.find({"session_id": session['session_id']}) + for material in materials: + if 'text_content' in material: + context += material['text_content'] + "\n" + + with st.spinner("Generating questions and synoptic..."): + try: + # Store generated content in session state to persist between rerenders + questions = generate_questions( + context if context else None, + num_subjective_questions, + session['title'], + session.get('description', '') + ) + + if questions: + synoptic = generate_synoptic( + questions, + context if context else None, + session['title'], + num_subjective_questions + ) + + if synoptic: + # Store in session state + st.session_state.generated_questions = questions + st.session_state.generated_synoptic = synoptic + st.session_state.test_title = test_title + + # Display preview + st.subheader("Preview Subjective Questions and Synoptic") + for i, (q, s) in enumerate(zip(questions, synoptic), 1): + st.markdown(f"**Question {i}:** {q['question']}") + with st.expander(f"View Synoptic {i}"): + st.markdown(s) + + # Save button outside the form + if st.button("Save Test"): + test_id = save_subjective_test( + course_id, + session['session_id'], + test_title, + questions + ) + if test_id: + st.success("Subjective test saved successfully!") + else: + st.error("Error saving subjective test.") + else: + st.error("Error generating synoptic answers. Please try again.") + else: + st.error("Error generating questions. Please try again.") + except Exception as e: + st.error(f"An error occurred: {str(e)}") + + # Display previously generated test if it exists in session state + elif hasattr(st.session_state, 'generated_questions') and hasattr(st.session_state, 'generated_synoptic'): + st.subheader("Preview Subjective Questions and Synoptic") + for i, (q, s) in enumerate(zip(st.session_state.generated_questions, st.session_state.generated_synoptic), 1): + st.markdown(f"**Question {i}:** {q['question']}") + with st.expander(f"View Synoptic {i}"): + st.markdown(s) + + if st.button("Save Test"): + test_id = save_subjective_test( + course_id, + session['session_id'], + st.session_state.test_title, + st.session_state.generated_questions, + ) + if test_id: + st.success("Subjective test saved successfully!") + # Clear session state after successful save + del st.session_state.generated_questions + del st.session_state.generated_synoptic + del st.session_state.test_title + else: + st.error("Error saving subjective test.") + + # st.subheader("Create quiz section UI for faculty") + st.subheader("Create Quiz") + + questions = [] + with st.form("create_quiz_form"): + quiz_title = st.text_input("Quiz Title") + num_questions = st.number_input("Number of Questions", min_value=1, max_value=20, value=5) + + # Option to choose quiz generation method + generation_method = st.radio( + "Question Generation Method", + ["Generate from Pre-class Materials", "Generate Random Questions"] + ) + + submit_quiz = st.form_submit_button("Generate Quiz") + if submit_quiz: + if generation_method == "Generate from Pre-class Materials": + # Get pre-class materials from resources_collection + materials = resources_collection.find({"session_id": session['session_id']}) + context = "" + for material in materials: + if 'text_content' in material: + context += material['text_content'] + "\n" + + if not context: + st.error("No pre-class materials found for this session.") + return + + # Generate MCQs from context + questions = generate_mcqs(context, num_questions, session['title'], session.get('description', '')) + else: + # Generate random MCQs based on session title and description + questions = generate_mcqs(None, num_questions, session['title'], session.get('description', '')) + print(questions) + + if questions: + # Preview generated questions + st.subheader("Preview Generated Questions") + for i, q in enumerate(questions, 1): + st.markdown(f"**Question {i}:** {q['question']}") + for opt in q['options']: + st.markdown(f"- {opt}") + st.markdown(f"*Correct Answer: {q['correct_option']}*") + + # Save quiz + quiz_id = save_quiz(course_id, session['session_id'], quiz_title, questions, faculty_id) + if quiz_id: + st.success("Quiz saved successfully!") + else: + st.error("Error saving quiz.") + + st.subheader("Add Assignment") + with st.form("add_assignment_form"): + title = st.text_input("Assignment Title") + description = st.text_area("Assignment Description") + due_date = st.date_input("Due Date") + submit = st.form_submit_button("Add Assignment") + + if submit: + if not title or not description: + st.error("Please fill in all required fields.") + return + + due_date = datetime.combine(due_date, datetime.min.time()) + assignment = { + "_id": ObjectId(), + "title": title, + "description": description, + "due_date": due_date, + "course_id": course_id, + "session_id": session['session_id'], + "faculty_id": faculty_id, + "created_at": datetime.utcnow(), + "status": "active", + "submissions": [] + } + + assignments_collection.insert_one(assignment) + st.success("Assignment added successfully!") + + st.subheader("Existing Assignments") + assignments = assignments_collection.find({ + "session_id": session['session_id'], + "course_id": course_id + }) + + for assignment in assignments: + with st.expander(f"đ {assignment['title']}", expanded=True): + st.markdown(f"**Due Date:** {assignment['due_date'].strftime('%Y-%m-%d')}") + st.markdown(f"**Description:** {assignment['description']}") + + total_submissions = len(assignment.get('submissions', [])) + total_students = students_collection.count_documents({ + "enrolled_courses": { + "$elemMatch": {"course_id": course_id} + } + }) + + col1, col2, col3 = st.columns(3) + with col1: + st.metric("Total Submissions", total_submissions) + with col2: + submission_rate = (total_submissions / total_students * 100) if total_students > 0 else 0 + st.metric("Submission Rate", f"{submission_rate:.1f}%") + with col3: + st.metric("Pending Submissions", total_students - total_submissions) + + # Display evaluation button and status + evaluation_status = st.empty() + eval_button = st.button("View/Generate Evaluations", key=f"eval_{assignment['_id']}") + + if eval_button: + st.session_state.show_evaluations = True + st.session_state.current_assignment = assignment['_id'] + + # Show evaluation interface in a new container instead of an expander + evaluation_container = st.container() + with evaluation_container: + from assignment_evaluation import display_evaluation_to_faculty + display_evaluation_to_faculty(session['session_id'], student_id, course_id) + + else: # Student view + assignments = assignments_collection.find({ + "session_id": session['session_id'], + "course_id": course_id, + "status": "active" + }) + + for assignment in assignments: + with st.expander(f"đ {assignment['title']}", expanded=True): + st.markdown(f"**Due Date:** {assignment['due_date'].strftime('%Y-%m-%d')}") + st.markdown(f"**Description:** {assignment['description']}") + + existing_submission = next( + (sub for sub in assignment.get('submissions', []) + if sub['student_id'] == str(student_id)), + None + ) + + if existing_submission: + st.success("Assignment submitted!") + st.markdown(f"**Submitted on:** {existing_submission['submitted_at'].strftime('%Y-%m-%d %H:%M')}") + + # Show evaluation status and feedback in the same container + evaluation = assignment_evaluation_collection.find_one({ + "assignment_id": assignment['_id'], + "student_id": str(student_id) + }) + + if evaluation: + st.markdown("### Evaluation") + st.markdown(evaluation['evaluation']) + else: + st.info("Evaluation pending. Check back later.") + else: + uploaded_file = st.file_uploader( + "Upload your work", + type=['pdf', 'doc', 'docx', 'txt', 'py', 'ipynb', 'ppt', 'pptx'], + key=f"upload_{assignment['_id']}" + ) + + if uploaded_file is not None: + if st.button("Submit Assignment", key=f"submit_{assignment['_id']}"): + text_content = extract_text_from_file(uploaded_file) + + submission = { + "student_id": str(student_id), + "file_name": uploaded_file.name, + "file_type": uploaded_file.type, + "file_content": uploaded_file.getvalue(), + "text_content": text_content, + "submitted_at": datetime.utcnow() + } + + assignments_collection.update_one( + {"_id": assignment['_id']}, + {"$push": {"submissions": submission}} + ) + + st.success("Assignment submitted successfully!") + st.rerun() + +def display_inclass_analytics(session, course_id): + """Display in-class analytics for faculty""" + st.subheader("In-class Analytics") + + # Get all enrolled students count for participation rate calculation + total_students = students_collection.count_documents({ + "enrolled_courses": { + "$elemMatch": {"course_id": course_id} + } + }) + + if total_students == 0: + st.warning("No students enrolled in this course.") + return + + # Get all polls for this session + polls = polls_collection.find({ + "session_id": session['session_id'] + }) + + polls_list = list(polls) + if not polls_list: + st.warning("No polls have been conducted in this session yet.") + return + + # Overall Poll Participation Metrics + st.markdown("### Overall Poll Participation") + + # Calculate overall participation metrics + total_polls = len(polls_list) + participating_students = set() + poll_participation_data = [] + + for poll in polls_list: + respondents = set(poll.get('respondents', [])) + participating_students.update(respondents) + poll_participation_data.append({ + 'Poll Title': poll.get('question', 'Untitled Poll'), + 'Respondents': len(respondents), + 'Participation Rate': (len(respondents) / total_students * 100) + }) + + # Display summary metrics + col1, col2, col3 = st.columns(3) + with col1: + st.metric("Total Polls Conducted", total_polls) + with col2: + st.metric("Active Participants", len(participating_students)) + with col3: + avg_participation = sum(p['Participation Rate'] for p in poll_participation_data) / total_polls + st.metric("Average Participation Rate", f"{avg_participation:.1f}%") + + # Participation Trend Graph + # st.markdown("### Poll Participation Trends") + # participation_df = pd.DataFrame(poll_participation_data) + + # # Create line chart for participation trends + # fig = px.line(participation_df, + # x='Poll Title', + # y='Participation Rate', + # title='Poll Participation Rates Over Time', + # markers=True) + # fig.update_layout( + # xaxis_title="Polls", + # yaxis_title="Participation Rate (%)", + # yaxis_range=[0, 100] + # ) + # st.plotly_chart(fig) + + # Individual Poll Results + st.markdown("### Individual Poll Results") + + for poll in polls_list: + with st.expander(f"đ {poll.get('question', 'Untitled Poll')}"): + responses = poll.get('responses', {}) + respondents = poll.get('respondents', []) + + # Calculate metrics for this poll + response_count = len(respondents) + participation_rate = (response_count / total_students) * 100 + + # Display poll metrics + col1, col2 = st.columns(2) + with col1: + st.metric("Total Responses", response_count) + with col2: + st.metric("Participation Rate", f"{participation_rate:.1f}%") + + if responses: + # Create DataFrame for responses + response_df = pd.DataFrame(list(responses.items()), + columns=['Option', 'Votes']) + response_df['Percentage'] = (response_df['Votes'] / response_df['Votes'].sum() * 100).round(1) + + # Display response distribution + fig = px.bar(response_df, + x='Option', + y='Votes', + title='Response Distribution', + text='Percentage') + fig.update_traces(texttemplate='%{text:.1f}%', textposition='outside') + st.plotly_chart(fig) + + # Display detailed response table + st.markdown("#### Detailed Response Breakdown") + response_df['Percentage'] = response_df['Percentage'].apply(lambda x: f"{x}%") + st.table(response_df) + + # Non-participating students + non_participants = list(students_collection.find({ + "courses": course_id, + "_id": {"$nin": respondents} + })) + + if non_participants: + st.markdown("#### Students Who Haven't Participated") + non_participant_data = [{ + 'Name': student.get('name', 'Unknown'), + 'SID': student.get('sid', 'Unknown') + } for student in non_participants] + st.table(pd.DataFrame(non_participant_data)) + + # Export functionality for participation data + st.markdown("### Export Analytics") + + if st.button("Download Poll Analytics Report"): + # Create a more detailed DataFrame for export + export_data = [] + for poll in polls_list: + poll_data = { + 'Poll Question': poll.get('question', 'Untitled'), + 'Total Responses': len(poll.get('respondents', [])), + 'Participation Rate': f"{(len(poll.get('respondents', [])) / total_students * 100):.1f}%" + } + # Add response distribution + for option, votes in poll.get('responses', {}).items(): + poll_data[f"Option: {option}"] = votes + export_data.append(poll_data) + + export_df = pd.DataFrame(export_data) + csv = export_df.to_csv(index=False).encode('utf-8') + st.download_button( + "đĨ Download Complete Report", + csv, + "poll_analytics.csv", + "text/csv", + key='download-csv' + ) + +def display_postclass_analytics(session, course_id): + """Display post-class analytics for faculty""" + st.subheader("Post-class Analytics") + + # Get all assignments for this session + session_data = courses_collection2.find_one( + {"sessions.session_id": session['session_id']}, + {"sessions.$": 1} + ) + + if not session_data or 'sessions' not in session_data: + st.warning("No assignments found for this session.") + return + + assignments = session_data['sessions'][0].get('post_class', {}).get('assignments', []) + + for assignment in assignments: + with st.expander(f"đ Assignment: {assignment.get('title', 'Untitled')}"): + # Get submission analytics + submissions = assignment.get('submissions', []) + # total_students = students_collection.count_documents({"courses": session['course_id']}) + total_students = students_collection.count_documents({ + "enrolled_courses": { + "$elemMatch": {"course_id": course_id} + } + }) + # Calculate submission metrics + submitted_count = len(submissions) + submission_rate = (submitted_count / total_students) * 100 if total_students > 0 else 0 + + # Display metrics + col1, col2, col3 = st.columns(3) + with col1: + st.metric("Submissions Received", submitted_count) + with col2: + st.metric("Submission Rate", f"{submission_rate:.1f}%") + with col3: + st.metric("Pending Submissions", total_students - submitted_count) + + # Display submission timeline + if submissions: + submission_dates = [sub.get('submitted_at') for sub in submissions if 'submitted_at' in sub] + if submission_dates: + df = pd.DataFrame(submission_dates, columns=['Submission Date']) + fig = px.histogram(df, x='Submission Date', + title='Submission Timeline', + labels={'Submission Date': 'Date', 'count': 'Number of Submissions'}) + st.plotly_chart(fig) + + # Display submission status breakdown + status_counts = { + 'pending': total_students - submitted_count, + 'submitted': submitted_count, + 'late': len([sub for sub in submissions if sub.get('is_late', False)]) + } + + st.markdown("### Submission Status Breakdown") + status_df = pd.DataFrame(list(status_counts.items()), + columns=['Status', 'Count']) + st.bar_chart(status_df.set_index('Status')) + + # List of students who haven't submitted + if status_counts['pending'] > 0: + st.markdown("### Students with Pending Submissions") + # submitted_ids = [sub.get('student_id') for sub in submissions] + submitted_ids = [ObjectId(sub.get('student_id')) for sub in submissions] + print(submitted_ids) + pending_students = students_collection.find({ + "enrolled_courses.course_id": course_id, + "_id": {"$nin": submitted_ids} + }) + print(pending_students) + for student in pending_students: + st.markdown(f"- {student.get('full_name', 'Unknown Student')} (SID: {student.get('SID', 'Unknown SID')})") + +def get_chat_history(user_id, session_id): + query = { + "user_id": ObjectId(user_id), + "session_id": session_id, + "timestamp": {"$lte": datetime.utcnow()} + } + result = chat_history_collection.find(query) + return list(result) + +def get_response_from_llm(raw_data): + messages = [ + { + "role": "system", + "content": "You are an AI that refines raw analytics data into actionable insights for faculty reports." + }, + { + "role": "user", + "content": f""" + Based on the following analytics data, refine and summarize the insights: + + Raw Data: + {raw_data} + + Instructions: + 1. Group similar topics together under appropriate categories. + 2. Remove irrelevant or repetitive entries. + 3. Summarize the findings into actionable insights. + 4. Provide concise recommendations for improvement based on the findings. + + Output: + Provide a structured response with the following format: + {{ + "Low Engagement Topics": ["List of Topics"], + "Frustration Areas": ["List of areas"], + "Recommendations": ["Actionable recommendations"], + }} + """ + } + ] + try: + client = OpenAI(api_key=OPENAI_API_KEY) + response = client.chat.completions.create( + model="gpt-4o-mini", + messages=messages, + temperature=0.2 + ) + content = response.choices[0].message.content + return json.loads(content) + + except Exception as e: + st.error(f"Error generating response: {str(e)}") + return None + +import typing_extensions as typing +from typing import Union, List, Dict + +# class Topics(typing.TypedDict): +# overarching_theme: List[Dict[str, Union[str, List[Dict[str, Union[str, List[str]]]]]]] +# indirect_topics: List[Dict[str, str]] + +def extract_topics_from_materials(session): + """Extract topics from pre-class materials""" + materials = resources_collection.find({"session_id": session['session_id']}) + texts = "" + if materials: + for material in materials: + if 'text_content' in material: + text = material['text_content'] + texts += text + "\n" + else: + st.warning("No text content found in the material.") + return + else: + st.error("No pre-class materials found for this session.") + return + + if texts: + context_prompt = f""" + Task: Extract Comprehensive Topics in a List Format + You are tasked with analyzing the provided text content and extracting a detailed, flat list of topics. + + Instructions: + Identify All Topics: Extract a comprehensive list of all topics, subtopics, and indirect topics present in the provided text content. This list should include: + + Overarching themes + Main topics + Subtopics and their sub-subtopics + Indirectly related topics + Flat List Format: Provide a flat list where each item is a topic. Ensure topics at all levels (overarching, main, sub, sub-sub, indirect) are represented as individual entries in the list. + + Be Exhaustive: Ensure the response captures every topic, subtopic, and indirectly related concept comprehensively. + + Output Requirements: + Use this structure: + {{ + "topics": [ + "Topic 1", + "Topic 2", + "Topic 3", + ... + ] + }} + Do Not Include: Do not include backticks, hierarchical structures, or the word 'json' in your response. + + Content to Analyze: + {texts} + """ + try: + # response = model.generate_content(context_prompt, generation_config=genai.GenerationConfig(response_mime_type="application/json", response_schema=list[Topics])) + response = model.generate_content(context_prompt, generation_config=genai.GenerationConfig(temperature=0.3)) + if not response or not response.text: + st.error("Error extracting topics from materials.") + return + + topics = response.text + return topics + except Exception as e: + st.error(f"Error extracting topics: {str(e)}") + return None + else: + st.error("No text content found in the pre-class materials.") + return None + +def convert_json_to_dict(json_str): + try: + return json.loads(json_str) + except Exception as e: + st.error(f"Error converting JSON to dictionary. {str(e)}") + return None + +# Load topics from a JSON file +# topics = [] +# with open(r'topics.json', 'r') as file: +# topics = json.load(file) + +def get_preclass_analytics(session, course_id): + # Earlier Code: + # """Get all user_ids from chat_history collection where session_id matches""" + # user_ids = chat_history_collection.distinct("user_id", {"session_id": session['session_id']}) + # print(user_ids) + # session_id = session['session_id'] + + # all_chat_histories = [] + + # for user_id in user_ids: + # result = get_chat_history(user_id, session_id) + # if result: + # for record in result: + # chat_history = { + # "user_id": record["user_id"], + # "session_id": record["session_id"], + # "messages": record["messages"] + # } + # all_chat_histories.append(chat_history) + # else: + # st.warning("No chat history found for this session.") + + + # # Pass the pre-class materials content to the analytics engine + # topics = extract_topics_from_materials(session) + # # dict_topics = convert_json_to_dict(topics) + # print(topics) + + # # # Use the 1st analytics engine + # # analytics_engine = NovaScholarAnalytics(all_topics_list=topics) + # # # extracted_topics = analytics_engine._extract_topics(None, topics) + # # # print(extracted_topics) + + # # results = analytics_engine.process_chat_history(all_chat_histories) + # # faculty_report = analytics_engine.generate_faculty_report(results) + # # print(faculty_report) + # # # Pass this Faculty Report to an LLM model for refinements and clarity + # # refined_report = get_response_from_llm(faculty_report) + # # return refined_report + + # # Use the 2nd analytice engine (using LLM): + fallback_analytics = { + "topic_insights": [], + "student_insights": [], + "recommended_actions": [ + { + "action": "Review analytics generation process", + "priority": "high", + "target_group": "system_administrators", + "reasoning": "Analytics generation failed", + "expected_impact": "Restore analytics functionality" + } + ], + "course_health": { + "overall_engagement": 0, + "critical_topics": [], + "class_distribution": { + "high_performers": 0, + "average_performers": 0, + "at_risk": 0 + } + }, + "intervention_metrics": { + "immediate_attention_needed": [], + "monitoring_required": [] + } + } + # analytics_generator = NovaScholarAnalytics() + # analytics2 = analytics_generator.generate_analytics(all_chat_histories, topics) + # # enriched_analytics = analytics_generator._enrich_analytics(analytics2) + # print("Analytics is: ", analytics2) + + # if analytics2 == fallback_analytics: + # return None + # else: + # return analytics2 + # # print(json.dumps(analytics, indent=2)) + + + # New Code: + # Debug print 1: Check session + print("Starting get_preclass_analytics with session:", session['session_id']) + + user_ids = chat_history_collection.distinct("user_id", {"session_id": session['session_id']}) + # Debug print 2: Check user_ids + print("Found user_ids:", user_ids) + + all_chat_histories = [] + for user_id in user_ids: + result = get_chat_history(user_id, session['session_id']) + # Debug print 3: Check each chat history result + print(f"Chat history for user {user_id}:", "Found" if result else "Not found") + if result: + for record in result: + chat_history = { + "user_id": record["user_id"], + "session_id": record["session_id"], + "messages": record["messages"] + } + all_chat_histories.append(chat_history) + + # Debug print 4: Check chat histories + print("Total chat histories collected:", len(all_chat_histories)) + + # Extract topics with debug print + topics = extract_topics_from_materials(session) + # Debug print 5: Check topics + print("Extracted topics:", topics) + + if not topics: + print("Topics extraction failed") # Debug print 6 + return None + + analytics_generator = NovaScholarAnalytics() + analytics2 = analytics_generator.generate_analytics(all_chat_histories, topics) + # Debug print 7: Check analytics + print("Generated analytics:", analytics2) + + if analytics2 == fallback_analytics: + print("Fallback analytics returned") # Debug print 8 + return None + else: + try: + courses_collection.update_one( + {"course_id": course_id, "sessions.session_id": session['session_id']}, + {"$set": {"sessions.$.pre_class.analytics": analytics2}} + ) + except Exception as e: + print("Error storing analytics:", str(e)) + return analytics2 + + +# Load Analytics from a JSON file +# analytics = [] +# with open(r'new_analytics2.json', 'r') as file: +# analytics = json.load(file) + +def display_preclass_analytics2(session, course_id): + # Earlier Code: + # Initialize or get analytics data from session state + # if 'analytics_data' not in st.session_state: + # st.session_state.analytics_data = get_preclass_analytics(session) + + # analytics = st.session_state.analytics_data + + # print(analytics) + + + # New Code: + # Initialize or get analytics data from session state + if 'analytics_data' not in st.session_state: + # Add debug prints + analytics_data = get_preclass_analytics(session, course_id) + if analytics_data is None: + st.info("Fetching new analytics data...") + if analytics_data is None: + st.error("Failed to generate analytics. Please check the following:") + st.write("1. Ensure pre-class materials contain text content") + st.write("2. Verify chat history exists for this session") + st.write("3. Check if topic extraction was successful") + return + st.session_state.analytics_data = analytics_data + + analytics = st.session_state.analytics_data + + # Validate analytics data structure + if not isinstance(analytics, dict): + st.error(f"Invalid analytics data type: {type(analytics)}") + return + + required_keys = ["topic_wise_insights", "ai_recommended_actions", "student_analytics"] + missing_keys = [key for key in required_keys if key not in analytics] + if missing_keys: + st.error(f"Missing required keys in analytics data: {missing_keys}") + return + + # Initialize topic indices only if we have valid data + if 'topic_indices' not in st.session_state: + try: + st.session_state.topic_indices = list(range(len(analytics["topic_wise_insights"]))) + except Exception as e: + st.error(f"Error creating topic indices: {str(e)}") + st.write("Analytics data structure:", analytics) + return + + # Enhanced CSS for better styling and interactivity + st.markdown(""" + + """, unsafe_allow_html=True) + + # Topic-wise Analytics Section + st.markdown('{rec["action"]}
+Reason: {rec["reasoning"]}
+Expected Outcome: {rec["expected_outcome"]}
+