Dannyar608 commited on
Commit
97d65ae
·
verified ·
1 Parent(s): 96bd45b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1030 -404
app.py CHANGED
@@ -32,7 +32,7 @@ HF_TOKEN = os.getenv("HF_TOKEN")
32
  MODEL_CHOICES = {
33
  "TinyLlama (Fastest)": "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
34
  "Phi-2 (Balanced)": "microsoft/phi-2",
35
- "DeepSeek-V3 (Most Powerful)": "deepseek-ai/deepseek-llm-7b"
36
  }
37
  DEFAULT_MODEL = "TinyLlama (Fastest)"
38
 
@@ -273,7 +273,7 @@ class TranscriptParser:
273
 
274
  # Extract additional info
275
  grade_match = re.search(
276
- r"Current Grade: (\d+)\s*\|\s*YOG (\d{4})\s*\|\s*Weighted GPA ([\d.]+)\s*\|\s*Comm Serv Date \d{2}/\d{2}/\d{4}\s*\|\s*Total Credits Earned ([\d.]+)",
277
  text
278
  )
279
  if grade_match:
@@ -357,8 +357,12 @@ class TranscriptParser:
357
 
358
  def parse_transcript_with_ai(text: str, progress=gr.Progress()) -> Dict:
359
  """Use AI model to parse transcript text with progress feedback"""
 
 
 
 
 
360
  try:
361
- # First try structured parsing
362
  progress(0.1, desc="Parsing transcript structure...")
363
  parser = TranscriptParser()
364
  parsed_data = parser.parse_transcript(text)
@@ -394,14 +398,7 @@ def parse_transcript_with_ai(text: str, progress=gr.Progress()) -> Dict:
394
  return parse_transcript_with_ai_fallback(text, progress)
395
 
396
  def parse_transcript_with_ai_fallback(text: str, progress=gr.Progress()) -> Dict:
397
- """Fallback AI parsing method when structured parsing fails"""
398
- # Ensure model is loaded
399
- if not model_loader.loaded:
400
- model_loader.load_model(model_loader.current_model or DEFAULT_MODEL, progress)
401
-
402
- if not model_loader.model or not model_loader.tokenizer:
403
- raise gr.Error("AI model failed to load. Please try again or select a different model.")
404
-
405
  # Pre-process the text
406
  text = remove_sensitive_info(text[:15000]) # Limit input size
407
 
@@ -427,10 +424,10 @@ def parse_transcript_with_ai_fallback(text: str, progress=gr.Progress()) -> Dict
427
  progress(0.1, desc="Processing transcript with AI...")
428
 
429
  # Tokenize and generate response
430
- inputs = model_loader.tokenizer(prompt, return_tensors="pt").to(model_loader.model.device)
431
  progress(0.4)
432
 
433
- outputs = model_loader.model.generate(
434
  **inputs,
435
  max_new_tokens=1500,
436
  temperature=0.1,
@@ -439,7 +436,7 @@ def parse_transcript_with_ai_fallback(text: str, progress=gr.Progress()) -> Dict
439
  progress(0.8)
440
 
441
  # Decode the response
442
- response = model_loader.tokenizer.decode(outputs[0], skip_special_tokens=True)
443
 
444
  # Extract JSON from response
445
  json_str = response.split('```json')[1].split('```')[0].strip() if '```json' in response else response
@@ -528,7 +525,7 @@ def parse_transcript(file_obj, progress=gr.Progress()) -> Tuple[str, Optional[Di
528
  # Extract text from file
529
  text = extract_text_from_file(file_obj.name, file_ext)
530
 
531
- # Use hybrid parsing approach
532
  parsed_data = parse_transcript_with_ai(text, progress)
533
 
534
  # Format output text
@@ -555,70 +552,182 @@ def parse_transcript(file_obj, progress=gr.Progress()) -> Tuple[str, Optional[Di
555
  class LearningStyleQuiz:
556
  def __init__(self):
557
  self.questions = [
558
- "When learning something new, I prefer to:",
559
- "I remember information best when I:",
560
- "When giving directions, I:",
561
- "When I have to concentrate, I'm most distracted by:",
562
- "I prefer to get new information in:",
563
- "When I'm trying to recall something, I:",
564
- "When I'm angry, I tend to:",
565
- "I tend to:",
566
- "When I meet someone new, I remember:",
567
- "When I'm relaxing, I prefer to:"
 
 
 
 
 
 
 
 
 
 
568
  ]
569
 
570
  self.options = [
571
- ["See diagrams and charts", "Listen to an explanation", "Try it out myself"],
572
- ["See pictures or diagrams", "Hear someone explain it", "Do something with it"],
573
- ["Draw a map", "Give verbal instructions", "Show them how to get there"],
574
- ["Untidiness or movement", "Noises", "Other people moving around"],
575
- ["Written form", "Spoken form", "Demonstration form"],
576
- ["See a mental picture", "Repeat it to myself", "Feel it or move my hands"],
577
- ["Visualize the incident", "Shout and yell", "Stomp around and slam doors"],
578
- ["Talk to myself", "Use my hands when talking", "Move around a lot"],
579
- ["Their face", "Their name", "Something we did together"],
580
- ["Watch TV or read", "Listen to music or talk", "Do something active"]
 
 
 
 
 
 
 
 
 
 
581
  ]
582
 
583
  self.learning_styles = {
584
- "Visual": "You learn best through seeing. Use visual aids like diagrams, charts, and color-coding.",
585
- "Auditory": "You learn best through listening. Record lectures, discuss concepts, and use rhymes or songs.",
586
- "Kinesthetic": "You learn best through movement and touch. Use hands-on activities and take frequent breaks."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
587
  }
588
 
589
- def get_quiz_questions(self) -> List[Dict]:
590
- """Return formatted questions for the quiz interface"""
591
- return [
592
- {"question": q, "options": opts}
593
- for q, opts in zip(self.questions, self.options)
594
- ]
595
-
596
- def calculate_learning_style(self, answers: List[int]) -> Dict:
597
- """Calculate the learning style based on user answers"""
598
  if len(answers) != len(self.questions):
599
- raise ValueError("Invalid number of answers")
600
 
601
- style_counts = {"Visual": 0, "Auditory": 0, "Kinesthetic": 0}
602
- style_map = {0: "Visual", 1: "Auditory", 2: "Kinesthetic"}
603
 
604
- for answer in answers:
605
- if answer not in [0, 1, 2]:
606
- raise ValueError("Invalid answer value")
607
- style = style_map[answer]
608
- style_counts[style] += 1
 
 
 
609
 
610
- primary_style = max(style_counts, key=style_counts.get)
611
- secondary_styles = [
612
- style for style, count in style_counts.items()
613
- if style != primary_style and count > 0
614
- ]
615
 
616
- return {
617
- "primary": primary_style,
618
- "secondary": secondary_styles,
619
- "description": self.learning_styles[primary_style],
620
- "scores": style_counts
621
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
622
 
623
  # Initialize quiz instance
624
  learning_style_quiz = LearningStyleQuiz()
@@ -627,93 +736,193 @@ learning_style_quiz = LearningStyleQuiz()
627
  class ProfileManager:
628
  def __init__(self):
629
  self.profiles_dir = Path(PROFILES_DIR)
630
- self.profiles_dir.mkdir(exist_ok=True)
 
 
 
 
 
 
 
 
 
 
 
631
 
632
- def create_profile(
633
- self,
634
- name: str,
635
- age: int,
636
- grade_level: str,
637
- learning_style: Dict,
638
- transcript_data: Optional[Dict] = None
639
- ) -> str:
640
- """Create a new student profile with all collected data"""
641
  try:
 
642
  name = validate_name(name)
643
  age = validate_age(age)
 
644
 
645
- profile_id = f"{name.lower().replace(' ', '_')}_{age}"
646
- profile_path = self.profiles_dir / f"{profile_id}.json"
647
-
648
- if profile_path.exists():
649
- raise ValueError("Profile already exists")
 
 
 
 
 
 
650
 
651
- profile_data = {
652
- "id": profile_id,
653
  "name": name,
654
  "age": age,
655
- "grade_level": grade_level,
656
- "learning_style": learning_style,
657
- "transcript": transcript_data or {},
658
- "created_at": time.strftime("%Y-%m-%d %H:%M:%S"),
659
- "updated_at": time.strftime("%Y-%m-%d %H:%M:%S")
 
660
  }
661
 
662
- with open(profile_path, 'w') as f:
663
- json.dump(profile_data, f, indent=2)
664
 
665
- return profile_id
 
666
 
667
- except Exception as e:
668
- raise gr.Error(f"Error creating profile: {str(e)}")
669
-
670
- def get_profile(self, profile_id: str) -> Dict:
671
- """Retrieve a student profile by ID"""
672
- try:
673
- profile_path = self.profiles_dir / f"{profile_id}.json"
674
-
675
- if not profile_path.exists():
676
- raise ValueError("Profile not found")
 
677
 
678
- with open(profile_path, 'r') as f:
679
- return json.load(f)
680
-
681
  except Exception as e:
682
- raise gr.Error(f"Error loading profile: {str(e)}")
683
 
684
- def update_profile(self, profile_id: str, updates: Dict) -> Dict:
685
- """Update an existing profile with new data"""
686
  try:
687
- profile = self.get_profile(profile_id)
688
- profile.update(updates)
689
- profile["updated_at"] = time.strftime("%Y-%m-%d %H:%M:%S")
 
690
 
691
- profile_path = self.profiles_dir / f"{profile_id}.json"
692
- with open(profile_path, 'w') as f:
693
- json.dump(profile, f, indent=2)
694
 
695
- return profile
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
696
 
 
 
 
697
  except Exception as e:
698
- raise gr.Error(f"Error updating profile: {str(e)}")
 
699
 
700
- def list_profiles(self) -> List[Dict]:
701
- """List all available student profiles"""
702
- try:
703
- profiles = []
704
- for file in self.profiles_dir.glob("*.json"):
705
- with open(file, 'r') as f:
706
- profile = json.load(f)
707
- profiles.append({
708
- "id": profile["id"],
709
- "name": profile["name"],
710
- "age": profile["age"],
711
- "grade_level": profile["grade_level"],
712
- "created_at": profile["created_at"]
713
- })
714
- return sorted(profiles, key=lambda x: x["name"])
715
- except Exception as e:
716
- raise gr.Error(f"Error listing profiles: {str(e)}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
717
 
718
  # Initialize profile manager
719
  profile_manager = ProfileManager()
@@ -721,321 +930,739 @@ profile_manager = ProfileManager()
721
  # ========== AI TEACHING ASSISTANT ==========
722
  class TeachingAssistant:
723
  def __init__(self):
724
- self.model_loader = model_loader
 
725
 
726
- def generate_study_plan(self, profile_data: Dict, progress=gr.Progress()) -> str:
727
- """Generate a personalized study plan based on student profile"""
728
  try:
729
- # Ensure model is loaded
730
- if not self.model_loader.loaded:
731
- self.model_loader.load_model(DEFAULT_MODEL, progress)
 
732
 
733
- learning_style = profile_data.get("learning_style", {})
734
- transcript = profile_data.get("transcript", {})
735
 
736
- # Prepare prompt
737
- prompt = f"""
738
- Create a personalized study plan for {profile_data['name']}, a {profile_data['age']}-year-old student in grade {profile_data['grade_level']}.
 
 
 
 
 
739
 
740
- Learning Style:
741
- - Primary: {learning_style.get('primary', 'Unknown')}
742
- - Description: {learning_style.get('description', 'No learning style information')}
743
 
744
- Academic History:
745
- - Current GPA: {transcript.get('gpa', {}).get('weighted', 'N/A')} (weighted)
746
- - Courses Completed: {len(transcript.get('courses', []))}
747
-
748
- Focus on study techniques that match the student's learning style and provide specific recommendations based on their academic history.
749
- Include:
750
- 1. Daily study routine suggestions
751
- 2. Subject-specific strategies
752
- 3. Recommended resources
753
- 4. Time management tips
754
- 5. Any areas that need improvement
755
-
756
- Format the response with clear headings and bullet points.
757
- """
758
-
759
- progress(0.2, desc="Generating study plan...")
760
-
761
- # Generate response
762
- inputs = self.model_loader.tokenizer(prompt, return_tensors="pt").to(self.model_loader.model.device)
763
- outputs = self.model_loader.model.generate(
764
- **inputs,
765
- max_new_tokens=1000,
766
- temperature=0.7,
767
- do_sample=True
768
- )
769
-
770
- progress(0.8, desc="Formatting response...")
771
-
772
- response = self.model_loader.tokenizer.decode(outputs[0], skip_special_tokens=True)
773
- return self._format_response(response)
774
 
 
 
775
  except Exception as e:
776
- raise gr.Error(f"Error generating study plan: {str(e)}")
 
777
 
778
- def answer_question(self, question: str, context: str = "", progress=gr.Progress()) -> str:
779
- """Answer student questions with optional context"""
780
- try:
781
- if not question.strip():
782
- return "Please ask a question."
783
-
784
- # Ensure model is loaded
785
- if not self.model_loader.loaded:
786
- self.model_loader.load_model(DEFAULT_MODEL, progress)
787
-
788
- prompt = f"""
789
- Answer the following student question in a helpful, educational manner.
790
- {f"Context: {context}" if context else ""}
791
-
792
- Question: {question}
793
-
794
- Provide a clear, concise answer with examples if helpful. Break down complex concepts.
795
- If the question is unclear, ask for clarification.
796
- """
797
-
798
- progress(0.3, desc="Processing question...")
799
-
800
- # Generate response
801
- inputs = self.model_loader.tokenizer(prompt, return_tensors="pt").to(self.model_loader.model.device)
802
- outputs = self.model_loader.model.generate(
803
- **inputs,
804
- max_new_tokens=500,
805
- temperature=0.5,
806
- do_sample=True
807
- )
808
-
809
- progress(0.8, desc="Formatting answer...")
810
-
811
- response = self.model_loader.tokenizer.decode(outputs[0], skip_special_tokens=True)
812
- return self._format_response(response)
813
-
814
- except Exception as e:
815
- raise gr.Error(f"Error answering question: {str(e)}")
816
 
817
- def _format_response(self, text: str) -> str:
818
- """Format the AI response for better readability"""
819
- # Clean up common artifacts
820
- text = text.replace("<|endoftext|>", "").strip()
821
-
822
- # Add markdown formatting if not present
823
- if "#" not in text and "**" not in text:
824
- # Split into paragraphs and add headings
825
- sections = text.split("\n\n")
826
- formatted = []
827
- for section in sections:
828
- if section.strip().endswith(":"):
829
- formatted.append(f"**{section}**")
830
- else:
831
- formatted.append(section)
832
- text = "\n\n".join(formatted)
833
 
834
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
835
 
836
  # Initialize teaching assistant
837
  teaching_assistant = TeachingAssistant()
838
 
839
  # ========== GRADIO INTERFACE ==========
840
  def create_interface():
841
- with gr.Blocks(title="Student Profile Assistant", theme="soft") as app:
842
- session_token = gr.State(generate_session_token())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
843
 
844
- # Tab navigation
845
- with gr.Tabs():
846
- with gr.Tab("Profile Creation"):
 
847
  with gr.Row():
848
  with gr.Column(scale=1):
849
- gr.Markdown("## Student Information")
850
- name_input = gr.Textbox(label="Full Name", placeholder="Enter student's full name")
851
- age_input = gr.Number(label="Age", minimum=MIN_AGE, maximum=MAX_AGE, step=1)
852
- grade_level = gr.Dropdown(
853
- label="Grade Level",
854
- choices=["9", "10", "11", "12", "Other"],
855
- value="9"
856
- )
857
 
858
- gr.Markdown("## Transcript Upload")
859
- file_upload = gr.File(label="Upload Transcript", file_types=ALLOWED_FILE_TYPES)
860
- parse_btn = gr.Button("Parse Transcript")
861
- transcript_output = gr.Textbox(label="Transcript Summary", interactive=False, lines=10)
 
 
 
862
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
863
  with gr.Column(scale=1):
864
- gr.Markdown("## Learning Style Quiz")
865
- quiz_components = []
866
- for i, question in enumerate(learning_style_quiz.questions):
867
- quiz_components.append(
868
- gr.Radio(
869
- label=question,
870
- choices=learning_style_quiz.options[i],
871
- type="index"
872
- )
873
- )
874
 
875
- quiz_submit = gr.Button("Submit Quiz")
876
- learning_style_output = gr.JSON(label="Learning Style Results")
 
 
 
 
 
 
 
 
 
 
 
 
877
 
878
- gr.Markdown("## Complete Profile")
879
- create_profile_btn = gr.Button("Create Profile")
880
- profile_status = gr.Textbox(label="Profile Status", interactive=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
881
 
882
- with gr.Tab("Study Tools"):
 
883
  with gr.Row():
884
  with gr.Column(scale=1):
885
- gr.Markdown("## Study Plan Generator")
886
- profile_selector = gr.Dropdown(
887
- label="Select Profile",
888
- choices=[p["id"] for p in profile_manager.list_profiles()],
889
- interactive=True
890
- )
891
- refresh_profiles = gr.Button("Refresh Profiles")
892
- study_plan_btn = gr.Button("Generate Study Plan")
893
- study_plan_output = gr.Markdown(label="Personalized Study Plan")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
894
 
895
  with gr.Column(scale=1):
896
- gr.Markdown("## Ask the Teaching Assistant")
897
- question_input = gr.Textbox(label="Your Question", lines=3)
898
- context_input = gr.Textbox(label="Additional Context (optional)", lines=2)
899
- ask_btn = gr.Button("Ask Question")
900
- answer_output = gr.Markdown(label="Answer")
901
-
902
- with gr.Tab("Profile Management"):
903
- gr.Markdown("## Existing Profiles")
904
- profile_table = gr.Dataframe(
905
- headers=["Name", "Age", "Grade Level", "Created At"],
906
- datatype=["str", "number", "str", "str"],
907
- interactive=False
908
- )
909
- refresh_table = gr.Button("Refresh Profiles")
 
 
 
 
 
 
 
 
 
 
910
 
 
 
 
 
 
 
 
 
911
  with gr.Row():
912
- with gr.Column():
913
- gr.Markdown("## Profile Details")
914
- selected_profile = gr.Dropdown(
915
- label="Select Profile",
916
- choices=[p["id"] for p in profile_manager.list_profiles()],
917
- interactive=True
918
- )
919
- view_profile_btn = gr.Button("View Profile")
920
- profile_display = gr.JSON(label="Profile Data")
 
 
 
 
 
 
 
 
 
921
 
922
- with gr.Column():
923
- gr.Markdown("## Update Profile")
924
- update_grade = gr.Dropdown(
925
- label="Update Grade Level",
926
- choices=["9", "10", "11", "12", "Other"],
927
- interactive=True
928
  )
929
- update_transcript = gr.File(label="Update Transcript", file_types=ALLOWED_FILE_TYPES)
930
- update_btn = gr.Button("Update Profile")
931
- update_status = gr.Textbox(label="Update Status", interactive=False)
932
-
933
- # ========== EVENT HANDLERS ==========
934
- # Transcript parsing
935
- parse_btn.click(
936
- parse_transcript,
937
- inputs=[file_upload],
938
- outputs=[transcript_output, gr.State()],
939
- show_progress=True
940
- )
941
-
942
- # Learning style quiz
943
- quiz_submit.click(
944
- learning_style_quiz.calculate_learning_style,
945
- inputs=quiz_components,
946
- outputs=learning_style_output
947
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
948
 
949
- # Profile creation
950
- create_profile_btn.click(
951
- profile_manager.create_profile,
952
- inputs=[
953
- name_input,
954
- age_input,
955
- grade_level,
956
- learning_style_output,
957
- gr.State()
958
- ],
959
- outputs=profile_status
960
- ).then(
961
- lambda: [p["id"] for p in profile_manager.list_profiles()],
962
- outputs=profile_selector
963
- ).then(
964
- lambda: [p["id"] for p in profile_manager.list_profiles()],
965
- outputs=selected_profile
966
- ).then(
967
- lambda: profile_manager.list_profiles(),
968
- outputs=profile_table
969
- )
970
 
971
- # Study tools
972
- refresh_profiles.click(
973
- lambda: [p["id"] for p in profile_manager.list_profiles()],
974
- outputs=profile_selector
975
  )
976
-
977
- study_plan_btn.click(
978
- lambda profile_id: profile_manager.get_profile(profile_id),
979
- inputs=profile_selector,
980
- outputs=gr.State()
981
- ).then(
982
- teaching_assistant.generate_study_plan,
983
- inputs=gr.State(),
984
- outputs=study_plan_output,
985
- show_progress=True
986
  )
987
-
988
- # Teaching assistant
989
- ask_btn.click(
990
- teaching_assistant.answer_question,
991
- inputs=[question_input, context_input],
992
- outputs=answer_output,
993
- show_progress=True
994
  )
995
-
996
- # Profile management
997
- refresh_table.click(
998
- lambda: profile_manager.list_profiles(),
999
- outputs=profile_table
1000
- ).then(
1001
- lambda: [p["id"] for p in profile_manager.list_profiles()],
1002
- outputs=selected_profile
1003
  )
1004
-
1005
- view_profile_btn.click(
1006
- profile_manager.get_profile,
1007
- inputs=selected_profile,
1008
- outputs=profile_display
1009
  )
1010
 
1011
- update_btn.click(
1012
- lambda profile_id, grade, file_obj: (
1013
- profile_manager.update_profile(
1014
- profile_id,
1015
- {"grade_level": grade}
1016
- ) if not file_obj else None,
1017
- parse_transcript(file_obj) if file_obj else (None, None)
1018
- ),
1019
- inputs=[selected_profile, update_grade, update_transcript],
1020
- outputs=[profile_display, gr.State()]
1021
- ).then(
1022
- lambda: "Profile updated successfully!",
1023
- outputs=update_status
1024
- )
1025
 
1026
- # Initialization
1027
- app.load(
1028
- lambda: profile_manager.list_profiles(),
1029
- outputs=profile_table
1030
- ).then(
1031
- lambda: [p["id"] for p in profile_manager.list_profiles()],
1032
- outputs=profile_selector
1033
- ).then(
1034
- lambda: [p["id"] for p in profile_manager.list_profiles()],
1035
- outputs=selected_profile
1036
  )
1037
-
1038
- return app
1039
 
1040
  # Create the interface
1041
  app = create_interface()
@@ -1043,5 +1670,4 @@ app = create_interface()
1043
  # For Hugging Face Spaces deployment
1044
  if __name__ == "__main__":
1045
  app.launch()
1046
-
1047
 
 
32
  MODEL_CHOICES = {
33
  "TinyLlama (Fastest)": "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
34
  "Phi-2 (Balanced)": "microsoft/phi-2",
35
+ "DeepSeek-V3 (Most Powerful)": "deepseek-ai/DeepSeek-V3"
36
  }
37
  DEFAULT_MODEL = "TinyLlama (Fastest)"
38
 
 
273
 
274
  # Extract additional info
275
  grade_match = re.search(
276
+ r"Current Grade: (\d+)\s*\|\s*YOG (\d{4})\s*\|\s*Weighted GPA ([\d.]+)\s*\|\s*Total Credits Earned ([\d.]+)",
277
  text
278
  )
279
  if grade_match:
 
357
 
358
  def parse_transcript_with_ai(text: str, progress=gr.Progress()) -> Dict:
359
  """Use AI model to parse transcript text with progress feedback"""
360
+ model, tokenizer = model_loader.load_model(model_loader.current_model or DEFAULT_MODEL, progress)
361
+ if model is None or tokenizer is None:
362
+ raise gr.Error(f"Model failed to load. {model_loader.error or 'Please try loading a model first.'}")
363
+
364
+ # First try the structured parser
365
  try:
 
366
  progress(0.1, desc="Parsing transcript structure...")
367
  parser = TranscriptParser()
368
  parsed_data = parser.parse_transcript(text)
 
398
  return parse_transcript_with_ai_fallback(text, progress)
399
 
400
  def parse_transcript_with_ai_fallback(text: str, progress=gr.Progress()) -> Dict:
401
+ """Fallback AI parsing method"""
 
 
 
 
 
 
 
402
  # Pre-process the text
403
  text = remove_sensitive_info(text[:15000]) # Limit input size
404
 
 
424
  progress(0.1, desc="Processing transcript with AI...")
425
 
426
  # Tokenize and generate response
427
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
428
  progress(0.4)
429
 
430
+ outputs = model.generate(
431
  **inputs,
432
  max_new_tokens=1500,
433
  temperature=0.1,
 
436
  progress(0.8)
437
 
438
  # Decode the response
439
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
440
 
441
  # Extract JSON from response
442
  json_str = response.split('```json')[1].split('```')[0].strip() if '```json' in response else response
 
525
  # Extract text from file
526
  text = extract_text_from_file(file_obj.name, file_ext)
527
 
528
+ # Use AI for parsing
529
  parsed_data = parse_transcript_with_ai(text, progress)
530
 
531
  # Format output text
 
552
  class LearningStyleQuiz:
553
  def __init__(self):
554
  self.questions = [
555
+ "When you study for a test, you prefer to:",
556
+ "When you need directions to a new place, you prefer:",
557
+ "When you learn a new skill, you prefer to:",
558
+ "When you're trying to concentrate, you:",
559
+ "When you meet new people, you remember them by:",
560
+ "When you're assembling furniture or a gadget, you:",
561
+ "When choosing a restaurant, you rely most on:",
562
+ "When you're in a waiting room, you typically:",
563
+ "When giving someone instructions, you tend to:",
564
+ "When you're trying to recall information, you:",
565
+ "When you're at a museum or exhibit, you:",
566
+ "When you're learning a new language, you prefer:",
567
+ "When you're taking notes in class, you:",
568
+ "When you're explaining something complex, you:",
569
+ "When you're at a party, you enjoy:",
570
+ "When you're trying to remember a phone number, you:",
571
+ "When you're relaxing, you prefer to:",
572
+ "When you're learning to use new software, you:",
573
+ "When you're giving a presentation, you rely on:",
574
+ "When you're solving a difficult problem, you:"
575
  ]
576
 
577
  self.options = [
578
+ ["Read the textbook (Reading/Writing)", "Listen to lectures (Auditory)", "Use diagrams/charts (Visual)", "Practice problems (Kinesthetic)"],
579
+ ["Look at a map (Visual)", "Have someone tell you (Auditory)", "Write down directions (Reading/Writing)", "Try walking/driving there (Kinesthetic)"],
580
+ ["Read instructions (Reading/Writing)", "Have someone show you (Visual)", "Listen to explanations (Auditory)", "Try it yourself (Kinesthetic)"],
581
+ ["Need quiet (Reading/Writing)", "Need background noise (Auditory)", "Need to move around (Kinesthetic)", "Need visual stimulation (Visual)"],
582
+ ["Their face (Visual)", "Their name (Auditory)", "What you talked about (Reading/Writing)", "What you did together (Kinesthetic)"],
583
+ ["Read the instructions carefully (Reading/Writing)", "Look at the diagrams (Visual)", "Ask someone to explain (Auditory)", "Start putting pieces together (Kinesthetic)"],
584
+ ["Online photos of the food (Visual)", "Recommendations from friends (Auditory)", "Reading the menu online (Reading/Writing)", "Remembering how it felt to eat there (Kinesthetic)"],
585
+ ["Read magazines (Reading/Writing)", "Listen to music (Auditory)", "Watch TV (Visual)", "Fidget or move around (Kinesthetic)"],
586
+ ["Write them down (Reading/Writing)", "Explain verbally (Auditory)", "Demonstrate (Visual)", "Guide them physically (Kinesthetic)"],
587
+ ["See written words in your mind (Visual)", "Hear the information in your head (Auditory)", "Write it down to remember (Reading/Writing)", "Associate it with physical actions (Kinesthetic)"],
588
+ ["Read all the descriptions (Reading/Writing)", "Listen to audio guides (Auditory)", "Look at the displays (Visual)", "Touch interactive exhibits (Kinesthetic)"],
589
+ ["Study grammar rules (Reading/Writing)", "Listen to native speakers (Auditory)", "Use flashcards with images (Visual)", "Practice conversations (Kinesthetic)"],
590
+ ["Write detailed paragraphs (Reading/Writing)", "Record the lecture (Auditory)", "Draw diagrams and charts (Visual)", "Doodle while listening (Kinesthetic)"],
591
+ ["Write detailed steps (Reading/Writing)", "Explain verbally with examples (Auditory)", "Draw diagrams (Visual)", "Use physical objects to demonstrate (Kinesthetic)"],
592
+ ["Conversations with people (Auditory)", "Watching others or the environment (Visual)", "Writing notes or texting (Reading/Writing)", "Dancing or physical activities (Kinesthetic)"],
593
+ ["See the numbers in your head (Visual)", "Say them aloud (Auditory)", "Write them down (Reading/Writing)", "Dial them on a keypad (Kinesthetic)"],
594
+ ["Read a book (Reading/Writing)", "Listen to music (Auditory)", "Watch TV/movies (Visual)", "Do something physical (Kinesthetic)"],
595
+ ["Read the manual (Reading/Writing)", "Ask someone to show you (Visual)", "Call tech support (Auditory)", "Experiment with the software (Kinesthetic)"],
596
+ ["Detailed notes (Reading/Writing)", "Verbal explanations (Auditory)", "Visual slides (Visual)", "Physical demonstrations (Kinesthetic)"],
597
+ ["Write out possible solutions (Reading/Writing)", "Talk through it with someone (Auditory)", "Draw diagrams (Visual)", "Build a model or prototype (Kinesthetic)"]
598
  ]
599
 
600
  self.learning_styles = {
601
+ "Visual": {
602
+ "description": "Visual learners prefer using images, diagrams, and spatial understanding.",
603
+ "tips": [
604
+ "Use color coding in your notes",
605
+ "Create mind maps and diagrams",
606
+ "Watch educational videos",
607
+ "Use flashcards with images",
608
+ "Highlight important information in different colors"
609
+ ],
610
+ "careers": [
611
+ "Graphic Designer", "Architect", "Photographer",
612
+ "Engineer", "Surgeon", "Pilot"
613
+ ]
614
+ },
615
+ "Auditory": {
616
+ "description": "Auditory learners learn best through listening and speaking.",
617
+ "tips": [
618
+ "Record lectures and listen to them",
619
+ "Participate in study groups",
620
+ "Explain concepts out loud to yourself",
621
+ "Use rhymes or songs to remember information",
622
+ "Listen to educational podcasts"
623
+ ],
624
+ "careers": [
625
+ "Musician", "Journalist", "Lawyer",
626
+ "Psychologist", "Teacher", "Customer Service"
627
+ ]
628
+ },
629
+ "Reading/Writing": {
630
+ "description": "These learners prefer information displayed as words.",
631
+ "tips": [
632
+ "Write detailed notes",
633
+ "Create summaries in your own words",
634
+ "Read textbooks and articles",
635
+ "Make lists to organize information",
636
+ "Rewrite your notes to reinforce learning"
637
+ ],
638
+ "careers": [
639
+ "Writer", "Researcher", "Editor",
640
+ "Accountant", "Programmer", "Historian"
641
+ ]
642
+ },
643
+ "Kinesthetic": {
644
+ "description": "Kinesthetic learners learn through movement and hands-on activities.",
645
+ "tips": [
646
+ "Use hands-on activities",
647
+ "Take frequent movement breaks",
648
+ "Create physical models",
649
+ "Associate information with physical actions",
650
+ "Study while walking or pacing"
651
+ ],
652
+ "careers": [
653
+ "Athlete", "Chef", "Mechanic",
654
+ "Dancer", "Physical Therapist", "Carpenter"
655
+ ]
656
+ }
657
  }
658
 
659
+ def evaluate_quiz(self, *answers) -> str:
660
+ """Evaluate quiz answers and generate enhanced results."""
661
+ answers = list(answers) # Convert tuple to list
 
 
 
 
 
 
662
  if len(answers) != len(self.questions):
663
+ raise gr.Error("Not all questions were answered")
664
 
665
+ scores = {style: 0 for style in self.learning_styles}
 
666
 
667
+ for i, answer in enumerate(answers):
668
+ if not answer:
669
+ continue # Skip unanswered questions
670
+
671
+ for j, style in enumerate(self.learning_styles):
672
+ if answer == self.options[i][j]:
673
+ scores[style] += 1
674
+ break
675
 
676
+ total_answered = sum(1 for ans in answers if ans)
677
+ if total_answered == 0:
678
+ raise gr.Error("No answers provided")
 
 
679
 
680
+ percentages = {style: (score/total_answered)*100 for style, score in scores.items()}
681
+ sorted_styles = sorted(scores.items(), key=lambda x: x[1], reverse=True)
682
+
683
+ # Generate enhanced results report
684
+ result = "## Your Learning Style Results\n\n"
685
+ result += "### Scores:\n"
686
+ for style, score in sorted_styles:
687
+ result += f"- **{style}**: {score}/{total_answered} ({percentages[style]:.1f}%)\n"
688
+
689
+ max_score = max(scores.values())
690
+ primary_styles = [style for style, score in scores.items() if score == max_score]
691
+
692
+ result += "\n### Analysis:\n"
693
+ if len(primary_styles) == 1:
694
+ primary_style = primary_styles[0]
695
+ style_info = self.learning_styles[primary_style]
696
+
697
+ result += f"Your primary learning style is **{primary_style}**\n\n"
698
+ result += f"**{primary_style} Characteristics**:\n"
699
+ result += f"{style_info['description']}\n\n"
700
+
701
+ result += "**Recommended Study Strategies**:\n"
702
+ for tip in style_info['tips']:
703
+ result += f"- {tip}\n"
704
+
705
+ result += "\n**Potential Career Paths**:\n"
706
+ for career in style_info['careers'][:6]:
707
+ result += f"- {career}\n"
708
+
709
+ # Add complementary strategies
710
+ complementary = [s for s in sorted_styles if s[0] != primary_style][0][0]
711
+ result += f"\nYou might also benefit from some **{complementary}** strategies:\n"
712
+ for tip in self.learning_styles[complementary]['tips'][:3]:
713
+ result += f"- {tip}\n"
714
+ else:
715
+ result += "You have multiple strong learning styles:\n"
716
+ for style in primary_styles:
717
+ result += f"- **{style}**\n"
718
+
719
+ result += "\n**Combined Learning Strategies**:\n"
720
+ result += "You may benefit from combining different learning approaches:\n"
721
+ for style in primary_styles:
722
+ result += f"\n**{style}** techniques:\n"
723
+ for tip in self.learning_styles[style]['tips'][:2]:
724
+ result += f"- {tip}\n"
725
+
726
+ result += f"\n**{style}** career suggestions:\n"
727
+ for career in self.learning_styles[style]['careers'][:3]:
728
+ result += f"- {career}\n"
729
+
730
+ return result
731
 
732
  # Initialize quiz instance
733
  learning_style_quiz = LearningStyleQuiz()
 
736
  class ProfileManager:
737
  def __init__(self):
738
  self.profiles_dir = Path(PROFILES_DIR)
739
+ self.profiles_dir.mkdir(exist_ok=True, parents=True)
740
+ self.current_session = None
741
+
742
+ def set_session(self, session_token: str) -> None:
743
+ """Set the current session token."""
744
+ self.current_session = session_token
745
+
746
+ def get_profile_path(self, name: str) -> Path:
747
+ """Get profile path with session token if available."""
748
+ if self.current_session:
749
+ return self.profiles_dir / f"{name.replace(' ', '_')}_{self.current_session}_profile.json"
750
+ return self.profiles_dir / f"{name.replace(' ', '_')}_profile.json"
751
 
752
+ def save_profile(self, name: str, age: Union[int, str], interests: str,
753
+ transcript: Dict, learning_style: str,
754
+ movie: str, movie_reason: str, show: str, show_reason: str,
755
+ book: str, book_reason: str, character: str, character_reason: str,
756
+ blog: str) -> str:
757
+ """Save student profile with validation."""
 
 
 
758
  try:
759
+ # Validate required fields
760
  name = validate_name(name)
761
  age = validate_age(age)
762
+ interests = sanitize_input(interests)
763
 
764
+ # Prepare favorites data
765
+ favorites = {
766
+ "movie": sanitize_input(movie),
767
+ "movie_reason": sanitize_input(movie_reason),
768
+ "show": sanitize_input(show),
769
+ "show_reason": sanitize_input(show_reason),
770
+ "book": sanitize_input(book),
771
+ "book_reason": sanitize_input(book_reason),
772
+ "character": sanitize_input(character),
773
+ "character_reason": sanitize_input(character_reason)
774
+ }
775
 
776
+ # Prepare full profile data
777
+ data = {
778
  "name": name,
779
  "age": age,
780
+ "interests": interests,
781
+ "transcript": transcript if transcript else {},
782
+ "learning_style": learning_style if learning_style else "Not assessed",
783
+ "favorites": favorites,
784
+ "blog": sanitize_input(blog) if blog else "",
785
+ "session_token": self.current_session
786
  }
787
 
788
+ # Save to JSON file
789
+ filepath = self.get_profile_path(name)
790
 
791
+ with open(filepath, "w", encoding='utf-8') as f:
792
+ json.dump(data, f, indent=2, ensure_ascii=False)
793
 
794
+ # Upload to HF Hub if token is available
795
+ if HF_TOKEN:
796
+ try:
797
+ hf_api.upload_file(
798
+ path_or_fileobj=filepath,
799
+ path_in_repo=f"profiles/{filepath.name}",
800
+ repo_id="your-username/student-learning-assistant",
801
+ repo_type="dataset"
802
+ )
803
+ except Exception as e:
804
+ print(f"Failed to upload to HF Hub: {str(e)}")
805
 
806
+ return self._generate_profile_summary(data)
807
+
 
808
  except Exception as e:
809
+ raise gr.Error(f"Error saving profile: {str(e)}")
810
 
811
+ def load_profile(self, name: str = None, session_token: str = None) -> Dict:
812
+ """Load profile by name or return the first one found."""
813
  try:
814
+ if session_token:
815
+ profile_pattern = f"*{session_token}_profile.json"
816
+ else:
817
+ profile_pattern = "*.json"
818
 
819
+ profiles = list(self.profiles_dir.glob(profile_pattern))
820
+ if not profiles:
821
+ return {}
822
 
823
+ if name:
824
+ # Find profile by name
825
+ name = name.replace(" ", "_")
826
+ if session_token:
827
+ profile_file = self.profiles_dir / f"{name}_{session_token}_profile.json"
828
+ else:
829
+ profile_file = self.profiles_dir / f"{name}_profile.json"
830
+
831
+ if not profile_file.exists():
832
+ # Try loading from HF Hub
833
+ if HF_TOKEN:
834
+ try:
835
+ hf_api.download_file(
836
+ path_in_repo=f"profiles/{profile_file.name}",
837
+ repo_id="your-username/student-learning-assistant",
838
+ repo_type="dataset",
839
+ local_dir=self.profiles_dir
840
+ )
841
+ except:
842
+ raise gr.Error(f"No profile found for {name}")
843
+ else:
844
+ raise gr.Error(f"No profile found for {name}")
845
+ else:
846
+ # Load the first profile found
847
+ profile_file = profiles[0]
848
 
849
+ with open(profile_file, "r", encoding='utf-8') as f:
850
+ return json.load(f)
851
+
852
  except Exception as e:
853
+ print(f"Error loading profile: {str(e)}")
854
+ return {}
855
 
856
+ def list_profiles(self, session_token: str = None) -> List[str]:
857
+ """List all available profile names for the current session."""
858
+ if session_token:
859
+ profiles = list(self.profiles_dir.glob(f"*{session_token}_profile.json"))
860
+ else:
861
+ profiles = list(self.profiles_dir.glob("*.json"))
862
+
863
+ # Extract just the name part (without session token)
864
+ profile_names = []
865
+ for p in profiles:
866
+ name_part = p.stem.replace("_profile", "")
867
+ if session_token:
868
+ name_part = name_part.replace(f"_{session_token}", "")
869
+ profile_names.append(name_part.replace("_", " "))
870
+
871
+ return profile_names
872
+
873
+ def _generate_profile_summary(self, data: Dict) -> str:
874
+ """Generate markdown summary of the profile."""
875
+ transcript = data.get("transcript", {})
876
+ favorites = data.get("favorites", {})
877
+ learning_style = data.get("learning_style", "Not assessed")
878
+
879
+ markdown = f"""## Student Profile: {data['name']}
880
+ ### Basic Information
881
+ - **Age:** {data['age']}
882
+ - **Interests:** {data['interests']}
883
+ - **Learning Style:** {learning_style.split('##')[0].strip()}
884
+ ### Academic Information
885
+ {self._format_transcript(transcript)}
886
+ ### Favorites
887
+ - **Movie:** {favorites.get('movie', 'Not specified')}
888
+ *Reason:* {favorites.get('movie_reason', 'Not specified')}
889
+ - **TV Show:** {favorites.get('show', 'Not specified')}
890
+ *Reason:* {favorites.get('show_reason', 'Not specified')}
891
+ - **Book:** {favorites.get('book', 'Not specified')}
892
+ *Reason:* {favorites.get('book_reason', 'Not specified')}
893
+ - **Character:** {favorites.get('character', 'Not specified')}
894
+ *Reason:* {favorites.get('character_reason', 'Not specified')}
895
+ ### Personal Blog
896
+ {data.get('blog', '_No blog provided_')}
897
+ """
898
+ return markdown
899
+
900
+ def _format_transcript(self, transcript: Dict) -> str:
901
+ """Format transcript data for display."""
902
+ if not transcript or "courses" not in transcript:
903
+ return "_No transcript information available_"
904
+
905
+ display = "#### Course History\n"
906
+ courses_by_grade = transcript["courses"]
907
+
908
+ if isinstance(courses_by_grade, dict):
909
+ for grade in sorted(courses_by_grade.keys(), key=lambda x: int(x) if x.isdigit() else x):
910
+ display += f"\n**Grade {grade}**\n"
911
+ for course in courses_by_grade[grade]:
912
+ display += f"- {course.get('code', '')} {course.get('name', 'Unnamed course')}"
913
+ if 'grade' in course and course['grade']:
914
+ display += f" (Grade: {course['grade']})"
915
+ if 'credits' in course:
916
+ display += f" | Credits: {course['credits']}"
917
+ display += f" | Year: {course.get('year', 'N/A')}\n"
918
+
919
+ if 'gpa' in transcript:
920
+ gpa = transcript['gpa']
921
+ display += "\n**GPA**\n"
922
+ display += f"- Unweighted: {gpa.get('unweighted', 'N/A')}\n"
923
+ display += f"- Weighted: {gpa.get('weighted', 'N/A')}\n"
924
+
925
+ return display
926
 
927
  # Initialize profile manager
928
  profile_manager = ProfileManager()
 
930
  # ========== AI TEACHING ASSISTANT ==========
931
  class TeachingAssistant:
932
  def __init__(self):
933
+ self.context_history = []
934
+ self.max_context_length = 5 # Keep last 5 exchanges for context
935
 
936
+ def generate_response(self, message: str, history: List[List[Union[str, None]]], session_token: str) -> str:
937
+ """Generate personalized response based on student profile and context."""
938
  try:
939
+ # Load profile with session token
940
+ profile = profile_manager.load_profile(session_token=session_token)
941
+ if not profile:
942
+ return "Please complete and save your profile first using the previous tabs."
943
 
944
+ # Update context history
945
+ self._update_context(message, history)
946
 
947
+ # Extract profile information
948
+ name = profile.get("name", "there")
949
+ learning_style = profile.get("learning_style", "")
950
+ grade_level = profile.get("transcript", {}).get("grade_level", "unknown")
951
+ gpa = profile.get("transcript", {}).get("gpa", {})
952
+ interests = profile.get("interests", "")
953
+ courses = profile.get("transcript", {}).get("courses", {})
954
+ favorites = profile.get("favorites", {})
955
 
956
+ # Process message with context
957
+ response = self._process_message(message, profile)
 
958
 
959
+ # Add follow-up suggestions
960
+ if "study" in message.lower() or "learn" in message.lower():
961
+ response += "\n\nWould you like me to suggest a study schedule based on your courses?"
962
+ elif "course" in message.lower() or "class" in message.lower():
963
+ response += "\n\nWould you like help finding resources for any of these courses?"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
964
 
965
+ return response
966
+
967
  except Exception as e:
968
+ print(f"Error generating response: {str(e)}")
969
+ return "I encountered an error processing your request. Please try again."
970
 
971
+ def _update_context(self, message: str, history: List[List[Union[str, None]]]) -> None:
972
+ """Maintain conversation context."""
973
+ self.context_history.append({"role": "user", "content": message})
974
+ if history:
975
+ for h in history[-self.max_context_length:]:
976
+ if h[0]: # User message
977
+ self.context_history.append({"role": "user", "content": h[0]})
978
+ if h[1]: # Assistant message
979
+ self.context_history.append({"role": "assistant", "content": h[1]})
980
+
981
+ # Trim to maintain max context length
982
+ self.context_history = self.context_history[-(self.max_context_length*2):]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
983
 
984
+ def _process_message(self, message: str, profile: Dict) -> str:
985
+ """Process user message with profile context."""
986
+ message_lower = message.lower()
 
 
 
 
 
 
 
 
 
 
 
 
 
987
 
988
+ # Greetings
989
+ if any(greet in message_lower for greet in ["hi", "hello", "hey", "greetings"]):
990
+ return f"Hello {profile.get('name', 'there')}! How can I help you with your learning today?"
991
+
992
+ # Study help
993
+ study_words = ["study", "learn", "prepare", "exam", "test", "homework"]
994
+ if any(word in message_lower for word in study_words):
995
+ return self._generate_study_advice(profile)
996
+
997
+ # Grade help
998
+ grade_words = ["grade", "gpa", "score", "marks", "results"]
999
+ if any(word in message_lower for word in grade_words):
1000
+ return self._generate_grade_advice(profile)
1001
+
1002
+ # Interest help
1003
+ interest_words = ["interest", "hobby", "passion", "extracurricular"]
1004
+ if any(word in message_lower for word in interest_words):
1005
+ return self._generate_interest_advice(profile)
1006
+
1007
+ # Course help
1008
+ course_words = ["courses", "classes", "transcript", "schedule", "subject"]
1009
+ if any(word in message_lower for word in course_words):
1010
+ return self._generate_course_advice(profile)
1011
+
1012
+ # Favorites
1013
+ favorite_words = ["movie", "show", "book", "character", "favorite"]
1014
+ if any(word in message_lower for word in favorite_words):
1015
+ return self._generate_favorites_response(profile)
1016
+
1017
+ # General help
1018
+ if "help" in message_lower:
1019
+ return self._generate_help_response()
1020
+
1021
+ # Default response
1022
+ return ("I'm your personalized teaching assistant. I can help with study tips, "
1023
+ "grade information, course advice, and more. Try asking about how to "
1024
+ "study effectively or about your course history.")
1025
+
1026
+ def _generate_study_advice(self, profile: Dict) -> str:
1027
+ """Generate study advice based on learning style."""
1028
+ learning_style = profile.get("learning_style", "")
1029
+ response = ""
1030
+
1031
+ if "Visual" in learning_style:
1032
+ response = ("Based on your visual learning style, I recommend:\n"
1033
+ "- Creating colorful mind maps or diagrams\n"
1034
+ "- Using highlighters to color-code your notes\n"
1035
+ "- Watching educational videos on the topics\n"
1036
+ "- Creating flashcards with images\n\n")
1037
+ elif "Auditory" in learning_style:
1038
+ response = ("Based on your auditory learning style, I recommend:\n"
1039
+ "- Recording your notes and listening to them\n"
1040
+ "- Participating in study groups to discuss concepts\n"
1041
+ "- Explaining the material out loud to yourself\n"
1042
+ "- Finding podcasts or audio lectures on the topics\n\n")
1043
+ elif "Reading/Writing" in learning_style:
1044
+ response = ("Based on your reading/writing learning style, I recommend:\n"
1045
+ "- Writing detailed summaries in your own words\n"
1046
+ "- Creating organized outlines of the material\n"
1047
+ "- Reading additional textbooks or articles\n"
1048
+ "- Rewriting your notes to reinforce learning\n\n")
1049
+ elif "Kinesthetic" in learning_style:
1050
+ response = ("Based on your kinesthetic learning style, I recommend:\n"
1051
+ "- Creating physical models or demonstrations\n"
1052
+ "- Using hands-on activities to learn concepts\n"
1053
+ "- Taking frequent movement breaks while studying\n"
1054
+ "- Associating information with physical actions\n\n")
1055
+ else:
1056
+ response = ("Here are some general study tips:\n"
1057
+ "- Use the Pomodoro technique (25 min study, 5 min break)\n"
1058
+ "- Space out your study sessions over time\n"
1059
+ "- Test yourself with practice questions\n"
1060
+ "- Teach the material to someone else\n\n")
1061
+
1062
+ # Add time management advice
1063
+ response += ("**Time Management Tips**:\n"
1064
+ "- Create a study schedule and stick to it\n"
1065
+ "- Prioritize difficult subjects when you're most alert\n"
1066
+ "- Break large tasks into smaller, manageable chunks\n"
1067
+ "- Set specific goals for each study session")
1068
+
1069
+ return response
1070
+
1071
+ def _generate_grade_advice(self, profile: Dict) -> str:
1072
+ """Generate response about grades and GPA."""
1073
+ gpa = profile.get("transcript", {}).get("gpa", {})
1074
+ courses = profile.get("transcript", {}).get("courses", {})
1075
+
1076
+ response = (f"Your GPA information:\n"
1077
+ f"- Unweighted: {gpa.get('unweighted', 'N/A')}\n"
1078
+ f"- Weighted: {gpa.get('weighted', 'N/A')}\n\n")
1079
+
1080
+ # Identify any failing grades
1081
+ weak_subjects = []
1082
+ for grade_level, course_list in courses.items():
1083
+ for course in course_list:
1084
+ if course.get('grade', '').upper() in ['D', 'F']:
1085
+ weak_subjects.append(f"{course.get('code', '')} {course.get('name', 'Unknown course')}")
1086
+
1087
+ if weak_subjects:
1088
+ response += ("**Areas for Improvement**:\n"
1089
+ f"You might want to focus on these subjects: {', '.join(weak_subjects)}\n\n")
1090
+
1091
+ response += ("**Grade Improvement Strategies**:\n"
1092
+ "- Meet with your teachers to discuss your performance\n"
1093
+ "- Identify specific areas where you lost points\n"
1094
+ "- Create a targeted study plan for weak areas\n"
1095
+ "- Practice with past exams or sample questions")
1096
+
1097
+ return response
1098
+
1099
+ def _generate_interest_advice(self, profile: Dict) -> str:
1100
+ """Generate response based on student interests."""
1101
+ interests = profile.get("interests", "")
1102
+ response = f"I see you're interested in: {interests}\n\n"
1103
+
1104
+ response += ("**Suggestions**:\n"
1105
+ "- Look for clubs or extracurricular activities related to these interests\n"
1106
+ "- Explore career paths that align with these interests\n"
1107
+ "- Find online communities or forums about these topics\n"
1108
+ "- Consider projects or independent study in these areas")
1109
+
1110
+ return response
1111
+
1112
+ def _generate_course_advice(self, profile: Dict) -> str:
1113
+ """Generate response about courses."""
1114
+ courses = profile.get("transcript", {}).get("courses", {})
1115
+ grade_level = profile.get("transcript", {}).get("grade_level", "unknown")
1116
+
1117
+ response = "Here's a summary of your courses:\n"
1118
+ for grade in sorted(courses.keys(), key=lambda x: int(x) if x.isdigit() else x):
1119
+ response += f"\n**Grade {grade}**:\n"
1120
+ for course in courses[grade]:
1121
+ response += f"- {course.get('code', '')} {course.get('name', 'Unnamed course')}"
1122
+ if 'grade' in course:
1123
+ response += f" (Grade: {course['grade']})"
1124
+ response += "\n"
1125
+
1126
+ response += f"\nAs a grade {grade_level} student, you might want to:\n"
1127
+ if grade_level in ["9", "10"]:
1128
+ response += ("- Focus on building strong foundational skills\n"
1129
+ "- Explore different subjects to find your interests\n"
1130
+ "- Start thinking about college/career requirements")
1131
+ elif grade_level in ["11", "12"]:
1132
+ response += ("- Focus on courses relevant to your college/career goals\n"
1133
+ "- Consider taking AP or advanced courses if available\n"
1134
+ "- Ensure you're meeting graduation requirements")
1135
+
1136
+ return response
1137
+
1138
+ def _generate_favorites_response(self, profile: Dict) -> str:
1139
+ """Generate response about favorite items."""
1140
+ favorites = profile.get("favorites", {})
1141
+ response = "I see you enjoy:\n"
1142
+
1143
+ if favorites.get('movie'):
1144
+ response += f"- Movie: {favorites['movie']} ({favorites.get('movie_reason', 'no reason provided')})\n"
1145
+ if favorites.get('show'):
1146
+ response += f"- TV Show: {favorites['show']} ({favorites.get('show_reason', 'no reason provided')})\n"
1147
+ if favorites.get('book'):
1148
+ response += f"- Book: {favorites['book']} ({favorites.get('book_reason', 'no reason provided')})\n"
1149
+ if favorites.get('character'):
1150
+ response += f"- Character: {favorites['character']} ({favorites.get('character_reason', 'no reason provided')})\n"
1151
+
1152
+ response += "\nThese preferences suggest you might enjoy:\n"
1153
+ response += "- Similar books/movies in the same genre\n"
1154
+ response += "- Creative projects related to these stories\n"
1155
+ response += "- Analyzing themes or characters in your schoolwork"
1156
+
1157
+ return response
1158
+
1159
+ def _generate_help_response(self) -> str:
1160
+ """Generate help response with available commands."""
1161
+ return ("""I can help with:
1162
+ - **Study tips**: "How should I study for math?"
1163
+ - **Grade information**: "What's my GPA?"
1164
+ - **Course advice**: "Show me my course history"
1165
+ - **Interest suggestions**: "What clubs match my interests?"
1166
+ - **General advice**: "How can I improve my grades?"
1167
+ Try asking about any of these topics!""")
1168
 
1169
  # Initialize teaching assistant
1170
  teaching_assistant = TeachingAssistant()
1171
 
1172
  # ========== GRADIO INTERFACE ==========
1173
  def create_interface():
1174
+ with gr.Blocks(theme=gr.themes.Soft(), title="Student Learning Assistant") as app:
1175
+ # Session state
1176
+ session_token = gr.State(value=generate_session_token())
1177
+ profile_manager.set_session(session_token.value)
1178
+
1179
+ # Track completion status for each tab
1180
+ tab_completed = gr.State({
1181
+ 0: False, # Transcript Upload
1182
+ 1: False, # Learning Style Quiz
1183
+ 2: False, # Personal Questions
1184
+ 3: False, # Save & Review
1185
+ 4: False # AI Assistant
1186
+ })
1187
+
1188
+ # Custom CSS for better styling
1189
+ app.css = """
1190
+ .gradio-container {
1191
+ max-width: 1200px !important;
1192
+ margin: 0 auto;
1193
+ }
1194
+ .tab {
1195
+ padding: 20px;
1196
+ border-radius: 8px;
1197
+ background: white;
1198
+ box-shadow: 0 2px 4px rgba(0,0,0,0.1);
1199
+ }
1200
+ .progress-bar {
1201
+ height: 5px;
1202
+ background: linear-gradient(to right, #4CAF50, #8BC34A);
1203
+ margin-bottom: 15px;
1204
+ border-radius: 3px;
1205
+ }
1206
+ .quiz-question {
1207
+ margin-bottom: 15px;
1208
+ padding: 15px;
1209
+ background: #f5f5f5;
1210
+ border-radius: 5px;
1211
+ }
1212
+ .profile-card {
1213
+ border: 1px solid #e0e0e0;
1214
+ border-radius: 8px;
1215
+ padding: 15px;
1216
+ margin-bottom: 15px;
1217
+ background: white;
1218
+ }
1219
+ .chatbot {
1220
+ min-height: 500px;
1221
+ }
1222
+ .completed-tab {
1223
+ background: #2196F3 !important;
1224
+ color: white !important;
1225
+ }
1226
+ .incomplete-tab {
1227
+ background: #E0E0E0 !important;
1228
+ }
1229
+ .alert-box {
1230
+ padding: 15px;
1231
+ margin-bottom: 20px;
1232
+ border: 1px solid transparent;
1233
+ border-radius: 4px;
1234
+ color: #31708f;
1235
+ background-color: #d9edf7;
1236
+ border-color: #bce8f1;
1237
+ }
1238
+ .nav-message {
1239
+ padding: 10px;
1240
+ margin: 10px 0;
1241
+ border-radius: 4px;
1242
+ background-color: #ffebee;
1243
+ color: #c62828;
1244
+ }
1245
+ .model-loading {
1246
+ padding: 15px;
1247
+ margin: 15px 0;
1248
+ border-radius: 4px;
1249
+ background-color: #fff3e0;
1250
+ color: #e65100;
1251
+ }
1252
+ .model-selection {
1253
+ margin-bottom: 20px;
1254
+ padding: 15px;
1255
+ background: #f8f9fa;
1256
+ border-radius: 8px;
1257
+ }
1258
+ """
1259
+
1260
+ gr.Markdown("""
1261
+ # Student Learning Assistant
1262
+ **Your personalized education companion**
1263
+ Complete each step to get customized learning recommendations.
1264
+ """)
1265
+
1266
+ # Model selection section
1267
+ with gr.Group(elem_classes="model-selection"):
1268
+ model_selector = gr.Dropdown(
1269
+ choices=list(MODEL_CHOICES.keys()),
1270
+ value=DEFAULT_MODEL,
1271
+ label="Select AI Model",
1272
+ interactive=True
1273
+ )
1274
+ load_model_btn = gr.Button("Load Selected Model", variant="secondary")
1275
+ model_status = gr.HTML(
1276
+ value="<div class='model-loading'>Model not loaded yet. Please select and load a model.</div>",
1277
+ visible=True
1278
+ )
1279
+
1280
+ # Progress tracker
1281
+ with gr.Row():
1282
+ with gr.Column(scale=1):
1283
+ step1 = gr.Button("1. Upload Transcript", elem_classes="incomplete-tab")
1284
+ with gr.Column(scale=1):
1285
+ step2 = gr.Button("2. Learning Style Quiz", elem_classes="incomplete-tab", interactive=False)
1286
+ with gr.Column(scale=1):
1287
+ step3 = gr.Button("3. Personal Questions", elem_classes="incomplete-tab", interactive=False)
1288
+ with gr.Column(scale=1):
1289
+ step4 = gr.Button("4. Save & Review", elem_classes="incomplete-tab", interactive=False)
1290
+ with gr.Column(scale=1):
1291
+ step5 = gr.Button("5. AI Assistant", elem_classes="incomplete-tab", interactive=False)
1292
+
1293
+ # Alert box for quiz submission
1294
+ quiz_alert = gr.HTML(visible=False)
1295
+
1296
+ # Navigation message
1297
+ nav_message = gr.HTML(elem_classes="nav-message", visible=False)
1298
 
1299
+ # Main tabs
1300
+ with gr.Tabs() as tabs:
1301
+ # ===== TAB 1: Transcript Upload =====
1302
+ with gr.Tab("Transcript Upload", id=0) as tab1:
1303
  with gr.Row():
1304
  with gr.Column(scale=1):
1305
+ gr.Markdown("### Step 1: Upload Your Transcript")
1306
+ gr.Markdown("Upload a PDF or image of your academic transcript to analyze your courses and GPA.")
 
 
 
 
 
 
1307
 
1308
+ with gr.Group():
1309
+ transcript_file = gr.File(
1310
+ label="Transcript (PDF or Image)",
1311
+ file_types=ALLOWED_FILE_TYPES,
1312
+ type="filepath"
1313
+ )
1314
+ upload_btn = gr.Button("Upload & Analyze", variant="primary")
1315
 
1316
+ gr.Markdown("""
1317
+ **Supported Formats**: PDF, PNG, JPG
1318
+ **Note**: Your file is processed locally and not stored permanently.
1319
+ """)
1320
+
1321
+ with gr.Column(scale=2):
1322
+ transcript_output = gr.Textbox(
1323
+ label="Transcript Analysis",
1324
+ lines=20,
1325
+ interactive=False
1326
+ )
1327
+ transcript_data = gr.State()
1328
+
1329
+ def process_transcript_and_update(file_obj, current_tab_status, progress=gr.Progress()):
1330
+ output_text, data = parse_transcript(file_obj, progress)
1331
+ if "Error" not in output_text:
1332
+ new_status = current_tab_status.copy()
1333
+ new_status[0] = True
1334
+ return output_text, data, new_status, \
1335
+ gr.update(elem_classes="completed-tab"), \
1336
+ gr.update(interactive=True), \
1337
+ gr.update(visible=False)
1338
+ return output_text, data, current_tab_status, \
1339
+ gr.update(), gr.update(), gr.update()
1340
+
1341
+ upload_btn.click(
1342
+ fn=process_transcript_and_update,
1343
+ inputs=[transcript_file, tab_completed],
1344
+ outputs=[transcript_output, transcript_data, tab_completed, step1, step2, nav_message]
1345
+ )
1346
+
1347
+ # ===== TAB 2: Learning Style Quiz =====
1348
+ with gr.Tab("Learning Style Quiz", id=1) as tab2:
1349
+ with gr.Row():
1350
  with gr.Column(scale=1):
1351
+ gr.Markdown("### Step 2: Discover Your Learning Style")
1352
+ gr.Markdown("Complete this 20-question quiz to identify whether you're a visual, auditory, reading/writing, or kinesthetic learner.")
 
 
 
 
 
 
 
 
1353
 
1354
+ progress = gr.HTML("<div class='progress-bar' style='width: 0%'></div>")
1355
+ quiz_submit = gr.Button("Submit Quiz", variant="primary")
1356
+
1357
+ with gr.Column(scale=2):
1358
+ quiz_components = []
1359
+ with gr.Accordion("Quiz Questions", open=True):
1360
+ for i, (question, options) in enumerate(zip(learning_style_quiz.questions, learning_style_quiz.options)):
1361
+ with gr.Group(elem_classes="quiz-question"):
1362
+ q = gr.Radio(
1363
+ options,
1364
+ label=f"{i+1}. {question}",
1365
+ show_label=True
1366
+ )
1367
+ quiz_components.append(q)
1368
 
1369
+ learning_output = gr.Markdown(
1370
+ label="Your Learning Style Results",
1371
+ visible=False
1372
+ )
1373
+
1374
+ # Update progress bar as questions are answered
1375
+ for component in quiz_components:
1376
+ component.change(
1377
+ fn=lambda *answers: {
1378
+ progress: gr.HTML(
1379
+ f"<div class='progress-bar' style='width: {sum(1 for a in answers if a)/len(answers)*100}%'></div>"
1380
+ )
1381
+ },
1382
+ inputs=quiz_components,
1383
+ outputs=progress
1384
+ )
1385
+
1386
+ def submit_quiz_and_update(*args):
1387
+ # The first argument is the tab_completed state, followed by answers
1388
+ current_tab_status = args[0]
1389
+ answers = args[1:]
1390
+
1391
+ result = learning_style_quiz.evaluate_quiz(*answers)
1392
+
1393
+ new_status = current_tab_status.copy()
1394
+ new_status[1] = True
1395
+
1396
+ return result, \
1397
+ gr.update(visible=True), \
1398
+ new_status, \
1399
+ gr.update(elem_classes="completed-tab"), \
1400
+ gr.update(interactive=True), \
1401
+ gr.update(value="<div class='alert-box'>Quiz submitted successfully! Scroll down to view your results.</div>", visible=True), \
1402
+ gr.update(visible=False)
1403
+
1404
+ quiz_submit.click(
1405
+ fn=submit_quiz_and_update,
1406
+ inputs=[tab_completed] + quiz_components,
1407
+ outputs=[learning_output, learning_output, tab_completed, step2, step3, quiz_alert, nav_message]
1408
+ )
1409
 
1410
+ # ===== TAB 3: Personal Questions =====
1411
+ with gr.Tab("Personal Profile", id=2) as tab3:
1412
  with gr.Row():
1413
  with gr.Column(scale=1):
1414
+ gr.Markdown("### Step 3: Tell Us About Yourself")
1415
+ gr.Markdown("This information helps us provide personalized recommendations.")
1416
+
1417
+ with gr.Group():
1418
+ name = gr.Textbox(label="Full Name", placeholder="Your name")
1419
+ age = gr.Number(label="Age", minimum=MIN_AGE, maximum=MAX_AGE, precision=0)
1420
+ interests = gr.Textbox(
1421
+ label="Your Interests/Hobbies",
1422
+ placeholder="e.g., Science, Music, Sports, Art..."
1423
+ )
1424
+
1425
+ save_personal_btn = gr.Button("Save Information", variant="primary")
1426
+ save_confirmation = gr.HTML(visible=False)
1427
+
1428
+ gr.Markdown("### Favorites")
1429
+ with gr.Group():
1430
+ movie = gr.Textbox(label="Favorite Movie")
1431
+ movie_reason = gr.Textbox(label="Why do you like it?", lines=2)
1432
+ show = gr.Textbox(label="Favorite TV Show")
1433
+ show_reason = gr.Textbox(label="Why do you like it?", lines=2)
1434
+ book = gr.Textbox(label="Favorite Book")
1435
+ book_reason = gr.Textbox(label="Why do you like it?", lines=2)
1436
+ character = gr.Textbox(label="Favorite Character (from any story)")
1437
+ character_reason = gr.Textbox(label="Why do you like them?", lines=2)
1438
 
1439
  with gr.Column(scale=1):
1440
+ gr.Markdown("### Additional Information")
1441
+
1442
+ blog_checkbox = gr.Checkbox(
1443
+ label="Would you like to write a short blog about your learning experiences?",
1444
+ value=False
1445
+ )
1446
+ blog_text = gr.Textbox(
1447
+ label="Your Learning Blog",
1448
+ placeholder="Write about your learning journey, challenges, goals...",
1449
+ lines=8,
1450
+ visible=False
1451
+ )
1452
+ blog_checkbox.change(
1453
+ lambda x: gr.update(visible=x),
1454
+ inputs=blog_checkbox,
1455
+ outputs=blog_text
1456
+ )
1457
+
1458
+ def save_personal_info(name, age, interests, current_tab_status):
1459
+ if name.strip() and age and interests.strip():
1460
+ new_status = current_tab_status.copy()
1461
+ new_status[2] = True
1462
+ return new_status, gr.update(elem_classes="completed-tab"), gr.update(interactive=True), gr.update(value="<div class='alert-box'>Information saved!</div>", visible=True), gr.update(visible=False)
1463
+ return current_tab_status, gr.update(), gr.update(), gr.update(visible=False), gr.update(visible=True)
1464
 
1465
+ save_personal_btn.click(
1466
+ fn=save_personal_info,
1467
+ inputs=[name, age, interests, tab_completed],
1468
+ outputs=[tab_completed, step3, step4, save_confirmation, nav_message]
1469
+ )
1470
+
1471
+ # ===== TAB 4: Save & Review =====
1472
+ with gr.Tab("Save Profile", id=3) as tab4:
1473
  with gr.Row():
1474
+ with gr.Column(scale=1):
1475
+ gr.Markdown("### Step 4: Review & Save Your Profile")
1476
+ gr.Markdown("Verify your information before saving. You can return to previous steps to make changes.")
1477
+
1478
+ save_btn = gr.Button("Save Profile", variant="primary")
1479
+
1480
+ # Profile management section
1481
+ with gr.Group():
1482
+ load_profile_dropdown = gr.Dropdown(
1483
+ label="Load Existing Profile",
1484
+ choices=profile_manager.list_profiles(session_token.value),
1485
+ visible=bool(profile_manager.list_profiles(session_token.value))
1486
+ )
1487
+ with gr.Row():
1488
+ load_btn = gr.Button("Load", visible=bool(profile_manager.list_profiles(session_token.value)))
1489
+ delete_btn = gr.Button("Delete", variant="stop", visible=bool(profile_manager.list_profiles(session_token.value)))
1490
+
1491
+ clear_btn = gr.Button("Clear Form")
1492
 
1493
+ with gr.Column(scale=2):
1494
+ output_summary = gr.Markdown(
1495
+ "Your profile summary will appear here after saving.",
1496
+ label="Profile Summary"
 
 
1497
  )
1498
+
1499
+ # Save profile
1500
+ def save_profile_and_update(*args):
1501
+ # Extract inputs
1502
+ inputs = args[:-1] # All except the last which is tab_completed
1503
+ current_tab_status = args[-1]
1504
+
1505
+ # Call the original save function
1506
+ summary = profile_manager.save_profile(*inputs)
1507
+
1508
+ # Update completion status
1509
+ new_status = current_tab_status.copy()
1510
+ new_status[3] = True
1511
+
1512
+ return summary, new_status, gr.update(elem_classes="completed-tab"), gr.update(interactive=True), gr.update(visible=False)
1513
+
1514
+ save_btn.click(
1515
+ fn=save_profile_and_update,
1516
+ inputs=[
1517
+ name, age, interests, transcript_data, learning_output,
1518
+ movie, movie_reason, show, show_reason,
1519
+ book, book_reason, character, character_reason, blog_text,
1520
+ tab_completed
1521
+ ],
1522
+ outputs=[output_summary, tab_completed, step4, step5, nav_message]
1523
+ ).then(
1524
+ fn=lambda: profile_manager.list_profiles(session_token.value),
1525
+ outputs=load_profile_dropdown
1526
+ ).then(
1527
+ fn=lambda: gr.update(visible=True),
1528
+ outputs=load_btn
1529
+ ).then(
1530
+ fn=lambda: gr.update(visible=True),
1531
+ outputs=delete_btn
1532
+ )
1533
+
1534
+ # Load profile
1535
+ load_btn.click(
1536
+ fn=lambda name: profile_manager.load_profile(name, session_token.value),
1537
+ inputs=load_profile_dropdown,
1538
+ outputs=output_summary
1539
+ )
1540
+
1541
+ # Delete profile
1542
+ def delete_profile(name, session_token):
1543
+ if not name:
1544
+ raise gr.Error("Please select a profile to delete")
1545
+ try:
1546
+ profile_path = profile_manager.get_profile_path(name)
1547
+ if profile_path.exists():
1548
+ profile_path.unlink()
1549
+ return "Profile deleted successfully", ""
1550
+ except Exception as e:
1551
+ raise gr.Error(f"Error deleting profile: {str(e)}")
1552
+
1553
+ delete_btn.click(
1554
+ fn=delete_profile,
1555
+ inputs=[load_profile_dropdown, session_token],
1556
+ outputs=[output_summary, load_profile_dropdown]
1557
+ ).then(
1558
+ fn=lambda: gr.update(
1559
+ choices=profile_manager.list_profiles(session_token.value),
1560
+ visible=bool(profile_manager.list_profiles(session_token.value))
1561
+ ),
1562
+ outputs=load_profile_dropdown
1563
+ ).then(
1564
+ fn=lambda: gr.update(visible=bool(profile_manager.list_profiles(session_token.value))),
1565
+ outputs=load_btn
1566
+ ).then(
1567
+ fn=lambda: gr.update(visible=bool(profile_manager.list_profiles(session_token.value))),
1568
+ outputs=delete_btn
1569
+ )
1570
+
1571
+ # Clear form
1572
+ clear_btn.click(
1573
+ fn=lambda: [gr.update(value="") for _ in range(12)],
1574
+ outputs=[
1575
+ name, age, interests,
1576
+ movie, movie_reason, show, show_reason,
1577
+ book, book_reason, character, character_reason,
1578
+ blog_text
1579
+ ]
1580
+ ).then(
1581
+ fn=lambda: gr.update(value=""),
1582
+ outputs=output_summary
1583
+ ).then(
1584
+ fn=lambda: gr.update(value=False),
1585
+ outputs=blog_checkbox
1586
+ ).then(
1587
+ fn=lambda: gr.update(visible=False),
1588
+ outputs=blog_text
1589
+ )
1590
+
1591
+ # ===== TAB 5: AI Teaching Assistant =====
1592
+ with gr.Tab("AI Assistant", id=4) as tab5:
1593
+ gr.Markdown("## Your Personalized Learning Assistant")
1594
+ gr.Markdown("Ask me anything about studying, your courses, grades, or learning strategies.")
1595
+
1596
+ # Chat interface with session token
1597
+ chatbot = gr.ChatInterface(
1598
+ fn=lambda msg, hist: teaching_assistant.generate_response(msg, hist, session_token.value),
1599
+ examples=[
1600
+ "How should I study for my next math test?",
1601
+ "What's my current GPA?",
1602
+ "Show me my course history",
1603
+ "How can I improve my grades in science?",
1604
+ "What study methods match my learning style?"
1605
+ ],
1606
+ title=""
1607
+ )
1608
 
1609
+ # Tab navigation logic with completion check
1610
+ def navigate_to_tab(tab_index: int, tab_completed_status):
1611
+ # Always allow going back to previous tabs
1612
+ current_tab = tabs.selected
1613
+ if current_tab is not None and tab_index > current_tab:
1614
+ # Check if current tab is completed
1615
+ if not tab_completed_status.get(current_tab, False):
1616
+ return gr.Tabs(selected=current_tab), \
1617
+ gr.update(value=f"<div class='nav-message'>Please complete the current tab before proceeding to tab {tab_index + 1}</div>", visible=True), \
1618
+ gr.update(visible=False)
1619
+
1620
+ return gr.Tabs(selected=tab_index), gr.update(visible=False), gr.update(visible=False)
 
 
 
 
 
 
 
 
 
1621
 
1622
+ step1.click(
1623
+ fn=lambda idx, status: navigate_to_tab(idx, status),
1624
+ inputs=[gr.State(0), tab_completed],
1625
+ outputs=[tabs, nav_message, quiz_alert]
1626
  )
1627
+ step2.click(
1628
+ fn=lambda idx, status: navigate_to_tab(idx, status),
1629
+ inputs=[gr.State(1), tab_completed],
1630
+ outputs=[tabs, nav_message, quiz_alert]
 
 
 
 
 
 
1631
  )
1632
+ step3.click(
1633
+ fn=lambda idx, status: navigate_to_tab(idx, status),
1634
+ inputs=[gr.State(2), tab_completed],
1635
+ outputs=[tabs, nav_message, quiz_alert]
 
 
 
1636
  )
1637
+ step4.click(
1638
+ fn=lambda idx, status: navigate_to_tab(idx, status),
1639
+ inputs=[gr.State(3), tab_completed],
1640
+ outputs=[tabs, nav_message, quiz_alert]
 
 
 
 
1641
  )
1642
+ step5.click(
1643
+ fn=lambda idx, status: navigate_to_tab(idx, status),
1644
+ inputs=[gr.State(4), tab_completed],
1645
+ outputs=[tabs, nav_message, quiz_alert]
 
1646
  )
1647
 
1648
+ # Model loading functions
1649
+ def load_selected_model(model_name, progress=gr.Progress()):
1650
+ try:
1651
+ model_loader.load_model(model_name, progress)
1652
+ if model_loader.loaded:
1653
+ return gr.update(value=f"<div class='alert-box'>{model_name} loaded successfully!</div>", visible=True)
1654
+ else:
1655
+ return gr.update(value=f"<div class='nav-message'>Failed to load model: {model_loader.error}</div>", visible=True)
1656
+ except Exception as e:
1657
+ return gr.update(value=f"<div class='nav-message'>Error: {str(e)}</div>", visible=True)
 
 
 
 
1658
 
1659
+ load_model_btn.click(
1660
+ fn=load_selected_model,
1661
+ inputs=model_selector,
1662
+ outputs=model_status
 
 
 
 
 
 
1663
  )
1664
+
1665
+ return app
1666
 
1667
  # Create the interface
1668
  app = create_interface()
 
1670
  # For Hugging Face Spaces deployment
1671
  if __name__ == "__main__":
1672
  app.launch()
 
1673