Dannyar608 commited on
Commit
efc52d4
·
verified ·
1 Parent(s): 5b7059f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +86 -245
app.py CHANGED
@@ -8,7 +8,7 @@ from collections import defaultdict
8
  from typing import Dict, List, Optional, Tuple, Union
9
  import html
10
  from pathlib import Path
11
- import fitz # PyMuPDF for better PDF text extraction
12
  import pytesseract
13
  from PIL import Image
14
  import io
@@ -48,7 +48,7 @@ if HF_TOKEN:
48
  except Exception as e:
49
  logging.error(f"Failed to initialize Hugging Face API: {str(e)}")
50
 
51
- # ========== OPTIMIZED MODEL LOADING ==========
52
  class ModelLoader:
53
  def __init__(self):
54
  self.model = None
@@ -85,10 +85,6 @@ class ModelLoader:
85
  "low_cpu_mem_usage": True
86
  }
87
 
88
- # Add quantization config for low-memory devices
89
- if self.device == "cpu":
90
- model_kwargs["load_in_8bit"] = True
91
-
92
  if progress:
93
  progress(0.3, desc="Loading tokenizer...")
94
  self.tokenizer = AutoTokenizer.from_pretrained(
@@ -396,10 +392,6 @@ class TranscriptParser:
396
  "completion_status": self._calculate_completion()
397
  }, indent=2)
398
 
399
- async def parse_transcript_async(file_obj, progress=gr.Progress()):
400
- """Async wrapper for transcript parsing"""
401
- return await asyncio.to_thread(parse_transcript, file_obj, progress)
402
-
403
  def parse_transcript_with_ai(text: str, progress=gr.Progress()) -> Dict:
404
  """Use AI model to parse transcript text with progress feedback"""
405
  model, tokenizer = model_loader.load_model(progress)
@@ -438,7 +430,7 @@ def parse_transcript_with_ai(text: str, progress=gr.Progress()) -> Dict:
438
 
439
  if progress:
440
  progress(1.0)
441
- return validate_parsed_data(formatted_data)
442
 
443
  except Exception as e:
444
  logging.warning(f"Structured parsing failed, falling back to AI: {str(e)}")
@@ -473,11 +465,11 @@ def parse_transcript_with_ai_fallback(text: str, progress=gr.Progress()) -> Dict
473
  progress(0.1, desc="Processing transcript with AI...")
474
 
475
  # Tokenize and generate response
476
- inputs = model_loader.tokenizer(prompt, return_tensors="pt").to(model_loader.device)
477
  if progress:
478
  progress(0.4)
479
 
480
- outputs = model_loader.model.generate(
481
  **inputs,
482
  max_new_tokens=1500,
483
  temperature=0.1,
@@ -487,7 +479,7 @@ def parse_transcript_with_ai_fallback(text: str, progress=gr.Progress()) -> Dict
487
  progress(0.8)
488
 
489
  # Decode the response
490
- response = model_loader.tokenizer.decode(outputs[0], skip_special_tokens=True)
491
 
492
  # Extract JSON from response
493
  try:
@@ -500,7 +492,7 @@ def parse_transcript_with_ai_fallback(text: str, progress=gr.Progress()) -> Dict
500
 
501
  if progress:
502
  progress(1.0)
503
- return validate_parsed_data(parsed_data)
504
 
505
  except torch.cuda.OutOfMemoryError:
506
  raise gr.Error("The model ran out of memory. Try with a smaller transcript.")
@@ -508,32 +500,6 @@ def parse_transcript_with_ai_fallback(text: str, progress=gr.Progress()) -> Dict
508
  logging.error(f"AI parsing error: {str(e)}")
509
  raise gr.Error(f"Error processing transcript: {str(e)}")
510
 
511
- def validate_parsed_data(data: Dict) -> Dict:
512
- """Validate and clean the parsed data structure."""
513
- if not isinstance(data, dict):
514
- raise ValueError("Invalid data format")
515
-
516
- # Set default structure if missing
517
- if 'grade_level' not in data:
518
- data['grade_level'] = 'Unknown'
519
-
520
- if 'gpa' not in data:
521
- data['gpa'] = {'weighted': 'N/A', 'unweighted': 'N/A'}
522
-
523
- if 'courses' not in data:
524
- data['courses'] = []
525
-
526
- # Clean course data
527
- for course in data['courses']:
528
- if 'grade' in course:
529
- course['grade'] = course['grade'].upper().strip()
530
-
531
- # Ensure numeric credits are strings
532
- if 'credits' in course and isinstance(course['credits'], (int, float)):
533
- course['credits'] = str(course['credits'])
534
-
535
- return data
536
-
537
  def format_transcript_output(data: Dict) -> str:
538
  """Format the parsed data into human-readable text."""
539
  output = []
@@ -1243,130 +1209,66 @@ def create_interface():
1243
  4: False # AI Assistant
1244
  })
1245
 
1246
- # Custom CSS for better styling
1247
  app.css = """
1248
- .gradio-container {
1249
- max-width: 1200px !important;
1250
- margin: 0 auto;
1251
- }
1252
- .tab {
1253
- padding: 20px;
1254
- border-radius: 8px;
1255
- background: white;
1256
- box-shadow: 0 2px 4px rgba(0,0,0,0.1);
1257
- }
1258
- .progress-bar {
1259
- height: 5px;
1260
- background: linear-gradient(to right, #4CAF50, #8BC34A);
1261
- margin-bottom: 15px;
1262
- border-radius: 3px;
1263
- }
1264
- .quiz-question {
1265
- margin-bottom: 15px;
1266
- padding: 15px;
1267
- background: #f5f5f5;
1268
- border-radius: 5px;
1269
- }
1270
- .profile-card {
1271
- border: 1px solid #e0e0e0;
1272
- border-radius: 8px;
1273
- padding: 15px;
1274
- margin-bottom: 15px;
1275
- background: white;
1276
- }
1277
- .chatbot {
1278
- min-height: 500px;
1279
- }
1280
- .completed-tab {
1281
- background: #2196F3 !important;
1282
- color: white !important;
1283
- }
1284
- .incomplete-tab {
1285
- background: #E0E0E0 !important;
1286
- }
1287
- .alert-box {
1288
- padding: 15px;
1289
- margin-bottom: 20px;
1290
- border: 1px solid transparent;
1291
- border-radius: 4px;
1292
- color: #31708f;
1293
- background-color: #d9edf7;
1294
- border-color: #bce8f1;
1295
- }
1296
- .nav-message {
1297
- padding: 10px;
1298
- margin: 10px 0;
1299
- border-radius: 4px;
1300
- background-color: #ffebee;
1301
- color: #c62828;
1302
- }
1303
- .model-loading {
1304
- padding: 15px;
1305
- margin: 15px 0;
1306
- border-radius: 4px;
1307
- background-color: #fff3e0;
1308
- color: #e65100;
1309
- }
1310
  """
1311
 
 
1312
  gr.Markdown("""
1313
  # Student Learning Assistant
1314
  **Your personalized education companion**
1315
  Complete each step to get customized learning recommendations.
1316
  """)
1317
-
1318
- # Progress tracker
1319
  with gr.Row():
1320
- with gr.Column(scale=1):
1321
- step1 = gr.Button("1. Upload Transcript", elem_classes="incomplete-tab")
1322
- with gr.Column(scale=1):
1323
- step2 = gr.Button("2. Learning Style Quiz", elem_classes="incomplete-tab", interactive=False)
1324
- with gr.Column(scale=1):
1325
- step3 = gr.Button("3. Personal Questions", elem_classes="incomplete-tab", interactive=False)
1326
- with gr.Column(scale=1):
1327
- step4 = gr.Button("4. Save & Review", elem_classes="incomplete-tab", interactive=False)
1328
- with gr.Column(scale=1):
1329
- step5 = gr.Button("5. AI Assistant", elem_classes="incomplete-tab", interactive=False)
1330
-
1331
- # Alert box for quiz submission
1332
- quiz_alert = gr.HTML(visible=False)
1333
-
1334
- # Navigation message
1335
- nav_message = gr.HTML(elem_classes="nav-message", visible=False)
1336
-
1337
- # Main tabs (hidden since we're using the button navigation)
1338
- with gr.Tabs(visible=False) as tabs:
1339
- # ===== TAB 1: Transcript Upload =====
1340
- with gr.Tab("Transcript Upload", id=0) as tab1:
1341
  with gr.Row():
1342
  with gr.Column(scale=1):
1343
  gr.Markdown("### Step 1: Upload Your Transcript")
1344
- gr.Markdown("Upload a PDF or image of your academic transcript to analyze your courses and GPA.")
1345
-
1346
- with gr.Group():
1347
- transcript_file = gr.File(
1348
- label="Transcript (PDF or Image)",
1349
  file_types=ALLOWED_FILE_TYPES,
1350
  type="filepath"
1351
  )
1352
- upload_btn = gr.Button("Upload & Analyze", variant="primary")
1353
-
1354
- gr.Markdown("""
1355
- **Supported Formats**: PDF, PNG, JPG
1356
- **Note**: Your file is processed locally and not stored permanently.
1357
- """)
1358
 
1359
  with gr.Column(scale=2):
1360
  transcript_output = gr.Textbox(
1361
- label="Transcript Analysis",
1362
  lines=20,
1363
  interactive=False
1364
  )
1365
  transcript_data = gr.State()
1366
-
1367
- def process_transcript_and_update(file_obj, current_tab_status, progress=gr.Progress()):
1368
  try:
1369
- output_text, data = parse_transcript(file_obj, progress)
1370
  if "Error" not in output_text:
1371
  new_status = current_tab_status.copy()
1372
  new_status[0] = True
@@ -1379,9 +1281,8 @@ def create_interface():
1379
  gr.update(visible=False)
1380
  )
1381
  except Exception as e:
1382
- logging.error(f"Upload error: {str(e)}")
1383
  return (
1384
- f"Error processing transcript: {str(e)}",
1385
  None,
1386
  current_tab_status,
1387
  gr.update(),
@@ -1390,21 +1291,19 @@ def create_interface():
1390
  )
1391
 
1392
  upload_btn.click(
1393
- fn=process_transcript_and_update,
1394
- inputs=[transcript_file, tab_completed],
1395
- outputs=[transcript_output, transcript_data, tab_completed, step1, step2, nav_message],
1396
- concurrency_limit=1
1397
  )
1398
-
1399
- # ===== TAB 2: Learning Style Quiz =====
1400
- with gr.Tab("Learning Style Quiz", id=1) as tab2:
1401
  with gr.Row():
1402
  with gr.Column(scale=1):
1403
  gr.Markdown("### Step 2: Discover Your Learning Style")
1404
- gr.Markdown("Complete this 20-question quiz to identify whether you're a visual, auditory, reading/writing, or kinesthetic learner.")
1405
-
1406
  progress = gr.HTML("<div class='progress-bar' style='width: 0%'></div>")
1407
  quiz_submit = gr.Button("Submit Quiz", variant="primary")
 
1408
 
1409
  with gr.Column(scale=2):
1410
  quiz_components = []
@@ -1422,7 +1321,7 @@ def create_interface():
1422
  label="Your Learning Style Results",
1423
  visible=False
1424
  )
1425
-
1426
  # Update progress bar as questions are answered
1427
  for component in quiz_components:
1428
  component.change(
@@ -1436,7 +1335,6 @@ def create_interface():
1436
  )
1437
 
1438
  def submit_quiz_and_update(*args):
1439
- # The first argument is the tab_completed state, followed by answers
1440
  current_tab_status = args[0]
1441
  answers = args[1:]
1442
 
@@ -1450,11 +1348,10 @@ def create_interface():
1450
  new_status,
1451
  gr.update(elem_classes="completed-tab"),
1452
  gr.update(interactive=True),
1453
- gr.update(value="<div class='alert-box'>Quiz submitted successfully! Scroll down to view your results.</div>", visible=True),
1454
  gr.update(visible=False)
1455
  )
1456
  except Exception as e:
1457
- logging.error(f"Quiz error: {str(e)}")
1458
  return (
1459
  f"Error evaluating quiz: {str(e)}",
1460
  gr.update(visible=True),
@@ -1470,14 +1367,12 @@ def create_interface():
1470
  inputs=[tab_completed] + quiz_components,
1471
  outputs=[learning_output, learning_output, tab_completed, step2, step3, quiz_alert, nav_message]
1472
  )
1473
-
1474
- # ===== TAB 3: Personal Questions =====
1475
- with gr.Tab("Personal Profile", id=2) as tab3:
1476
  with gr.Row():
1477
  with gr.Column(scale=1):
1478
  gr.Markdown("### Step 3: Tell Us About Yourself")
1479
- gr.Markdown("This information helps us provide personalized recommendations.")
1480
-
1481
  with gr.Group():
1482
  name = gr.Textbox(label="Full Name", placeholder="Your name")
1483
  age = gr.Number(label="Age", minimum=MIN_AGE, maximum=MAX_AGE, precision=0)
@@ -1488,7 +1383,8 @@ def create_interface():
1488
 
1489
  save_personal_btn = gr.Button("Save Information", variant="primary")
1490
  save_confirmation = gr.HTML(visible=False)
1491
-
 
1492
  gr.Markdown("### Favorites")
1493
  with gr.Group():
1494
  movie = gr.Textbox(label="Favorite Movie")
@@ -1499,25 +1395,6 @@ def create_interface():
1499
  book_reason = gr.Textbox(label="Why do you like it?", lines=2)
1500
  character = gr.Textbox(label="Favorite Character (from any story)")
1501
  character_reason = gr.Textbox(label="Why do you like them?", lines=2)
1502
-
1503
- with gr.Column(scale=1):
1504
- gr.Markdown("### Additional Information")
1505
-
1506
- blog_checkbox = gr.Checkbox(
1507
- label="Would you like to write a short blog about your learning experiences?",
1508
- value=False
1509
- )
1510
- blog_text = gr.Textbox(
1511
- label="Your Learning Blog",
1512
- placeholder="Write about your learning journey, challenges, goals...",
1513
- lines=8,
1514
- visible=False
1515
- )
1516
- blog_checkbox.change(
1517
- lambda x: gr.update(visible=x),
1518
- inputs=blog_checkbox,
1519
- outputs=blog_text
1520
- )
1521
 
1522
  def save_personal_info(name, age, interests, current_tab_status):
1523
  try:
@@ -1548,27 +1425,23 @@ def create_interface():
1548
  inputs=[name, age, interests, tab_completed],
1549
  outputs=[tab_completed, step3, step4, save_confirmation, nav_message]
1550
  )
1551
-
1552
- # ===== TAB 4: Save & Review =====
1553
- with gr.Tab("Save Profile", id=3) as tab4:
1554
  with gr.Row():
1555
  with gr.Column(scale=1):
1556
  gr.Markdown("### Step 4: Review & Save Your Profile")
1557
- gr.Markdown("Verify your information before saving. You can return to previous steps to make changes.")
1558
-
1559
- save_btn = gr.Button("Save Profile", variant="primary")
1560
-
1561
- # Profile management section
1562
  with gr.Group():
1563
  load_profile_dropdown = gr.Dropdown(
1564
  label="Load Existing Profile",
1565
  choices=profile_manager.list_profiles(session_token.value),
1566
- visible=bool(profile_manager.list_profiles(session_token.value))
1567
  )
1568
  with gr.Row():
1569
- load_btn = gr.Button("Load", visible=bool(profile_manager.list_profiles(session_token.value)))
1570
- delete_btn = gr.Button("Delete", variant="stop", visible=bool(profile_manager.list_profiles(session_token.value)))
1571
 
 
1572
  clear_btn = gr.Button("Clear Form")
1573
 
1574
  with gr.Column(scale=2):
@@ -1576,21 +1449,15 @@ def create_interface():
1576
  "Your profile summary will appear here after saving.",
1577
  label="Profile Summary"
1578
  )
1579
-
1580
- # Save profile
1581
  def save_profile_and_update(*args):
1582
- # Extract inputs
1583
  inputs = args[:-1] # All except the last which is tab_completed
1584
  current_tab_status = args[-1]
1585
 
1586
  try:
1587
- # Call the original save function
1588
  summary = profile_manager.save_profile(*inputs)
1589
-
1590
- # Update completion status
1591
  new_status = current_tab_status.copy()
1592
  new_status[3] = True
1593
-
1594
  return (
1595
  summary,
1596
  new_status,
@@ -1599,7 +1466,6 @@ def create_interface():
1599
  gr.update(visible=False)
1600
  )
1601
  except Exception as e:
1602
- logging.error(f"Save profile error: {str(e)}")
1603
  return (
1604
  f"Error saving profile: {str(e)}",
1605
  current_tab_status,
@@ -1613,7 +1479,7 @@ def create_interface():
1613
  inputs=[
1614
  name, age, interests, transcript_data, learning_output,
1615
  movie, movie_reason, show, show_reason,
1616
- book, book_reason, character, character_reason, blog_text,
1617
  tab_completed
1618
  ],
1619
  outputs=[output_summary, tab_completed, step4, step5, nav_message]
@@ -1628,14 +1494,6 @@ def create_interface():
1628
  outputs=delete_btn
1629
  )
1630
 
1631
- # Load profile
1632
- load_btn.click(
1633
- fn=lambda name: profile_manager.load_profile(name, session_token.value),
1634
- inputs=load_profile_dropdown,
1635
- outputs=output_summary
1636
- )
1637
-
1638
- # Delete profile
1639
  def delete_profile(name, session_token):
1640
  if not name:
1641
  raise gr.Error("Please select a profile to delete")
@@ -1645,7 +1503,6 @@ def create_interface():
1645
  profile_path.unlink()
1646
  return "Profile deleted successfully", ""
1647
  except Exception as e:
1648
- logging.error(f"Delete profile error: {str(e)}")
1649
  raise gr.Error(f"Error deleting profile: {str(e)}")
1650
 
1651
  delete_btn.click(
@@ -1653,10 +1510,7 @@ def create_interface():
1653
  inputs=[load_profile_dropdown, session_token],
1654
  outputs=[output_summary, load_profile_dropdown]
1655
  ).then(
1656
- fn=lambda: gr.update(
1657
- choices=profile_manager.list_profiles(session_token.value),
1658
- visible=bool(profile_manager.list_profiles(session_token.value))
1659
- ),
1660
  outputs=load_profile_dropdown
1661
  ).then(
1662
  fn=lambda: gr.update(visible=bool(profile_manager.list_profiles(session_token.value))),
@@ -1666,32 +1520,21 @@ def create_interface():
1666
  outputs=delete_btn
1667
  )
1668
 
1669
- # Clear form
1670
  clear_btn.click(
1671
  fn=lambda: [gr.update(value="") for _ in range(12)],
1672
  outputs=[
1673
  name, age, interests,
1674
  movie, movie_reason, show, show_reason,
1675
  book, book_reason, character, character_reason,
1676
- blog_text
1677
  ]
1678
- ).then(
1679
- fn=lambda: gr.update(value=""),
1680
- outputs=output_summary
1681
- ).then(
1682
- fn=lambda: gr.update(value=False),
1683
- outputs=blog_checkbox
1684
- ).then(
1685
- fn=lambda: gr.update(visible=False),
1686
- outputs=blog_text
1687
  )
1688
-
1689
- # ===== TAB 5: AI Teaching Assistant =====
1690
- with gr.Tab("AI Assistant", id=4) as tab5:
1691
  gr.Markdown("## Your Personalized Learning Assistant")
1692
  gr.Markdown("Ask me anything about studying, your courses, grades, or learning strategies.")
1693
 
1694
- # Chat interface with session token
1695
  chatbot = gr.ChatInterface(
1696
  fn=lambda msg, hist: teaching_assistant.generate_response(msg, hist, session_token.value),
1697
  examples=[
@@ -1703,8 +1546,8 @@ def create_interface():
1703
  ],
1704
  title=""
1705
  )
1706
-
1707
- # Tab navigation logic with completion check
1708
  def navigate_to_tab(tab_index: int, tab_completed_status):
1709
  current_tab = tabs.selected
1710
 
@@ -1716,48 +1559,46 @@ def create_interface():
1716
  if not tab_completed_status.get(current_tab, False):
1717
  return (
1718
  gr.Tabs(selected=current_tab),
1719
- gr.update(value=f"⚠️ Complete Step {current_tab+1} first!", visible=True))
 
1720
 
1721
  return gr.Tabs(selected=tab_index), gr.update(visible=False)
1722
 
 
1723
  step1.click(
1724
- fn=lambda idx, status: navigate_to_tab(idx, status),
1725
  inputs=[gr.State(0), tab_completed],
1726
  outputs=[tabs, nav_message]
1727
  )
1728
  step2.click(
1729
- fn=lambda idx, status: navigate_to_tab(idx, status),
1730
  inputs=[gr.State(1), tab_completed],
1731
  outputs=[tabs, nav_message]
1732
  )
1733
  step3.click(
1734
- fn=lambda idx, status: navigate_to_tab(idx, status),
1735
  inputs=[gr.State(2), tab_completed],
1736
  outputs=[tabs, nav_message]
1737
  )
1738
  step4.click(
1739
- fn=lambda idx, status: navigate_to_tab(idx, status),
1740
  inputs=[gr.State(3), tab_completed],
1741
  outputs=[tabs, nav_message]
1742
  )
1743
  step5.click(
1744
- fn=lambda idx, status: navigate_to_tab(idx, status),
1745
  inputs=[gr.State(4), tab_completed],
1746
  outputs=[tabs, nav_message]
1747
  )
1748
 
1749
- # Load DeepSeek model automatically
1750
- app.load(
1751
- fn=lambda: model_loader.load_model(),
1752
- outputs=[]
1753
- )
1754
 
1755
  return app
1756
 
1757
- # Create the interface
1758
  app = create_interface()
1759
 
1760
- # For Hugging Face Spaces deployment
1761
  if __name__ == "__main__":
1762
  app.launch()
1763
 
 
8
  from typing import Dict, List, Optional, Tuple, Union
9
  import html
10
  from pathlib import Path
11
+ import fitz # PyMuPDF
12
  import pytesseract
13
  from PIL import Image
14
  import io
 
48
  except Exception as e:
49
  logging.error(f"Failed to initialize Hugging Face API: {str(e)}")
50
 
51
+ # ========== MODEL LOADER ==========
52
  class ModelLoader:
53
  def __init__(self):
54
  self.model = None
 
85
  "low_cpu_mem_usage": True
86
  }
87
 
 
 
 
 
88
  if progress:
89
  progress(0.3, desc="Loading tokenizer...")
90
  self.tokenizer = AutoTokenizer.from_pretrained(
 
392
  "completion_status": self._calculate_completion()
393
  }, indent=2)
394
 
 
 
 
 
395
  def parse_transcript_with_ai(text: str, progress=gr.Progress()) -> Dict:
396
  """Use AI model to parse transcript text with progress feedback"""
397
  model, tokenizer = model_loader.load_model(progress)
 
430
 
431
  if progress:
432
  progress(1.0)
433
+ return formatted_data
434
 
435
  except Exception as e:
436
  logging.warning(f"Structured parsing failed, falling back to AI: {str(e)}")
 
465
  progress(0.1, desc="Processing transcript with AI...")
466
 
467
  # Tokenize and generate response
468
+ inputs = tokenizer(prompt, return_tensors="pt").to(model_loader.device)
469
  if progress:
470
  progress(0.4)
471
 
472
+ outputs = model.generate(
473
  **inputs,
474
  max_new_tokens=1500,
475
  temperature=0.1,
 
479
  progress(0.8)
480
 
481
  # Decode the response
482
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
483
 
484
  # Extract JSON from response
485
  try:
 
492
 
493
  if progress:
494
  progress(1.0)
495
+ return parsed_data
496
 
497
  except torch.cuda.OutOfMemoryError:
498
  raise gr.Error("The model ran out of memory. Try with a smaller transcript.")
 
500
  logging.error(f"AI parsing error: {str(e)}")
501
  raise gr.Error(f"Error processing transcript: {str(e)}")
502
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
503
  def format_transcript_output(data: Dict) -> str:
504
  """Format the parsed data into human-readable text."""
505
  output = []
 
1209
  4: False # AI Assistant
1210
  })
1211
 
1212
+ # Custom CSS
1213
  app.css = """
1214
+ .gradio-container { max-width: 1200px !important; margin: 0 auto !important; }
1215
+ .tab-content { padding: 20px !important; border: 1px solid #e0e0e0 !important; border-radius: 8px !important; margin-top: 10px !important; }
1216
+ .completed-tab { background: #4CAF50 !important; color: white !important; }
1217
+ .incomplete-tab { background: #E0E0E0 !important; }
1218
+ .nav-message { padding: 10px; margin: 10px 0; border-radius: 4px; background-color: #ffebee; color: #c62828; }
1219
+ .file-upload { border: 2px dashed #4CAF50 !important; padding: 20px !important; border-radius: 8px !important; }
1220
+ .progress-bar { height: 5px; background: linear-gradient(to right, #4CAF50, #8BC34A); margin-bottom: 15px; border-radius: 3px; }
1221
+ .quiz-question { margin-bottom: 15px; padding: 15px; background: #f5f5f5; border-radius: 5px; }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1222
  """
1223
 
1224
+ # Header
1225
  gr.Markdown("""
1226
  # Student Learning Assistant
1227
  **Your personalized education companion**
1228
  Complete each step to get customized learning recommendations.
1229
  """)
1230
+
1231
+ # Navigation buttons
1232
  with gr.Row():
1233
+ with gr.Column(scale=1, min_width=100):
1234
+ step1 = gr.Button("1. Transcript", elem_classes="incomplete-tab")
1235
+ with gr.Column(scale=1, min_width=100):
1236
+ step2 = gr.Button("2. Quiz", elem_classes="incomplete-tab", interactive=False)
1237
+ with gr.Column(scale=1, min_width=100):
1238
+ step3 = gr.Button("3. Profile", elem_classes="incomplete-tab", interactive=False)
1239
+ with gr.Column(scale=1, min_width=100):
1240
+ step4 = gr.Button("4. Review", elem_classes="incomplete-tab", interactive=False)
1241
+ with gr.Column(scale=1, min_width=100):
1242
+ step5 = gr.Button("5. Assistant", elem_classes="incomplete-tab", interactive=False)
1243
+
1244
+ nav_message = gr.HTML(visible=False)
1245
+
1246
+ # Main tabs container - Now VISIBLE
1247
+ with gr.Tabs(visible=True) as tabs:
1248
+ # ===== TAB 1: TRANSCRIPT UPLOAD =====
1249
+ with gr.Tab("Transcript", id=0):
 
 
 
 
1250
  with gr.Row():
1251
  with gr.Column(scale=1):
1252
  gr.Markdown("### Step 1: Upload Your Transcript")
1253
+ with gr.Group(elem_classes="file-upload"):
1254
+ file_input = gr.File(
1255
+ label="Drag and drop your transcript here (PDF or Image)",
 
 
1256
  file_types=ALLOWED_FILE_TYPES,
1257
  type="filepath"
1258
  )
1259
+ upload_btn = gr.Button("Analyze Transcript", variant="primary")
 
 
 
 
 
1260
 
1261
  with gr.Column(scale=2):
1262
  transcript_output = gr.Textbox(
1263
+ label="Analysis Results",
1264
  lines=20,
1265
  interactive=False
1266
  )
1267
  transcript_data = gr.State()
1268
+
1269
+ def process_transcript(file_obj, current_tab_status):
1270
  try:
1271
+ output_text, data = parse_transcript(file_obj)
1272
  if "Error" not in output_text:
1273
  new_status = current_tab_status.copy()
1274
  new_status[0] = True
 
1281
  gr.update(visible=False)
1282
  )
1283
  except Exception as e:
 
1284
  return (
1285
+ f"Error: {str(e)}",
1286
  None,
1287
  current_tab_status,
1288
  gr.update(),
 
1291
  )
1292
 
1293
  upload_btn.click(
1294
+ process_transcript,
1295
+ inputs=[file_input, tab_completed],
1296
+ outputs=[transcript_output, transcript_data, tab_completed, step1, step2, nav_message]
 
1297
  )
1298
+
1299
+ # ===== TAB 2: LEARNING STYLE QUIZ =====
1300
+ with gr.Tab("Learning Style Quiz", id=1):
1301
  with gr.Row():
1302
  with gr.Column(scale=1):
1303
  gr.Markdown("### Step 2: Discover Your Learning Style")
 
 
1304
  progress = gr.HTML("<div class='progress-bar' style='width: 0%'></div>")
1305
  quiz_submit = gr.Button("Submit Quiz", variant="primary")
1306
+ quiz_alert = gr.HTML(visible=False)
1307
 
1308
  with gr.Column(scale=2):
1309
  quiz_components = []
 
1321
  label="Your Learning Style Results",
1322
  visible=False
1323
  )
1324
+
1325
  # Update progress bar as questions are answered
1326
  for component in quiz_components:
1327
  component.change(
 
1335
  )
1336
 
1337
  def submit_quiz_and_update(*args):
 
1338
  current_tab_status = args[0]
1339
  answers = args[1:]
1340
 
 
1348
  new_status,
1349
  gr.update(elem_classes="completed-tab"),
1350
  gr.update(interactive=True),
1351
+ gr.update(value="<div class='alert-box'>Quiz submitted successfully!</div>", visible=True),
1352
  gr.update(visible=False)
1353
  )
1354
  except Exception as e:
 
1355
  return (
1356
  f"Error evaluating quiz: {str(e)}",
1357
  gr.update(visible=True),
 
1367
  inputs=[tab_completed] + quiz_components,
1368
  outputs=[learning_output, learning_output, tab_completed, step2, step3, quiz_alert, nav_message]
1369
  )
1370
+
1371
+ # ===== TAB 3: PERSONAL QUESTIONS =====
1372
+ with gr.Tab("Personal Profile", id=2):
1373
  with gr.Row():
1374
  with gr.Column(scale=1):
1375
  gr.Markdown("### Step 3: Tell Us About Yourself")
 
 
1376
  with gr.Group():
1377
  name = gr.Textbox(label="Full Name", placeholder="Your name")
1378
  age = gr.Number(label="Age", minimum=MIN_AGE, maximum=MAX_AGE, precision=0)
 
1383
 
1384
  save_personal_btn = gr.Button("Save Information", variant="primary")
1385
  save_confirmation = gr.HTML(visible=False)
1386
+
1387
+ with gr.Column(scale=1):
1388
  gr.Markdown("### Favorites")
1389
  with gr.Group():
1390
  movie = gr.Textbox(label="Favorite Movie")
 
1395
  book_reason = gr.Textbox(label="Why do you like it?", lines=2)
1396
  character = gr.Textbox(label="Favorite Character (from any story)")
1397
  character_reason = gr.Textbox(label="Why do you like them?", lines=2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1398
 
1399
  def save_personal_info(name, age, interests, current_tab_status):
1400
  try:
 
1425
  inputs=[name, age, interests, tab_completed],
1426
  outputs=[tab_completed, step3, step4, save_confirmation, nav_message]
1427
  )
1428
+
1429
+ # ===== TAB 4: SAVE & REVIEW =====
1430
+ with gr.Tab("Save Profile", id=3):
1431
  with gr.Row():
1432
  with gr.Column(scale=1):
1433
  gr.Markdown("### Step 4: Review & Save Your Profile")
 
 
 
 
 
1434
  with gr.Group():
1435
  load_profile_dropdown = gr.Dropdown(
1436
  label="Load Existing Profile",
1437
  choices=profile_manager.list_profiles(session_token.value),
1438
+ visible=False
1439
  )
1440
  with gr.Row():
1441
+ load_btn = gr.Button("Load", visible=False)
1442
+ delete_btn = gr.Button("Delete", variant="stop", visible=False)
1443
 
1444
+ save_btn = gr.Button("Save Profile", variant="primary")
1445
  clear_btn = gr.Button("Clear Form")
1446
 
1447
  with gr.Column(scale=2):
 
1449
  "Your profile summary will appear here after saving.",
1450
  label="Profile Summary"
1451
  )
1452
+
 
1453
  def save_profile_and_update(*args):
 
1454
  inputs = args[:-1] # All except the last which is tab_completed
1455
  current_tab_status = args[-1]
1456
 
1457
  try:
 
1458
  summary = profile_manager.save_profile(*inputs)
 
 
1459
  new_status = current_tab_status.copy()
1460
  new_status[3] = True
 
1461
  return (
1462
  summary,
1463
  new_status,
 
1466
  gr.update(visible=False)
1467
  )
1468
  except Exception as e:
 
1469
  return (
1470
  f"Error saving profile: {str(e)}",
1471
  current_tab_status,
 
1479
  inputs=[
1480
  name, age, interests, transcript_data, learning_output,
1481
  movie, movie_reason, show, show_reason,
1482
+ book, book_reason, character, character_reason, "",
1483
  tab_completed
1484
  ],
1485
  outputs=[output_summary, tab_completed, step4, step5, nav_message]
 
1494
  outputs=delete_btn
1495
  )
1496
 
 
 
 
 
 
 
 
 
1497
  def delete_profile(name, session_token):
1498
  if not name:
1499
  raise gr.Error("Please select a profile to delete")
 
1503
  profile_path.unlink()
1504
  return "Profile deleted successfully", ""
1505
  except Exception as e:
 
1506
  raise gr.Error(f"Error deleting profile: {str(e)}")
1507
 
1508
  delete_btn.click(
 
1510
  inputs=[load_profile_dropdown, session_token],
1511
  outputs=[output_summary, load_profile_dropdown]
1512
  ).then(
1513
+ fn=lambda: profile_manager.list_profiles(session_token.value),
 
 
 
1514
  outputs=load_profile_dropdown
1515
  ).then(
1516
  fn=lambda: gr.update(visible=bool(profile_manager.list_profiles(session_token.value))),
 
1520
  outputs=delete_btn
1521
  )
1522
 
 
1523
  clear_btn.click(
1524
  fn=lambda: [gr.update(value="") for _ in range(12)],
1525
  outputs=[
1526
  name, age, interests,
1527
  movie, movie_reason, show, show_reason,
1528
  book, book_reason, character, character_reason,
1529
+ output_summary
1530
  ]
 
 
 
 
 
 
 
 
 
1531
  )
1532
+
1533
+ # ===== TAB 5: AI ASSISTANT =====
1534
+ with gr.Tab("AI Assistant", id=4):
1535
  gr.Markdown("## Your Personalized Learning Assistant")
1536
  gr.Markdown("Ask me anything about studying, your courses, grades, or learning strategies.")
1537
 
 
1538
  chatbot = gr.ChatInterface(
1539
  fn=lambda msg, hist: teaching_assistant.generate_response(msg, hist, session_token.value),
1540
  examples=[
 
1546
  ],
1547
  title=""
1548
  )
1549
+
1550
+ # Navigation logic
1551
  def navigate_to_tab(tab_index: int, tab_completed_status):
1552
  current_tab = tabs.selected
1553
 
 
1559
  if not tab_completed_status.get(current_tab, False):
1560
  return (
1561
  gr.Tabs(selected=current_tab),
1562
+ gr.update(value=f"⚠️ Complete Step {current_tab+1} first!", visible=True)
1563
+ )
1564
 
1565
  return gr.Tabs(selected=tab_index), gr.update(visible=False)
1566
 
1567
+ # Connect navigation buttons
1568
  step1.click(
1569
+ lambda idx, status: navigate_to_tab(idx, status),
1570
  inputs=[gr.State(0), tab_completed],
1571
  outputs=[tabs, nav_message]
1572
  )
1573
  step2.click(
1574
+ lambda idx, status: navigate_to_tab(idx, status),
1575
  inputs=[gr.State(1), tab_completed],
1576
  outputs=[tabs, nav_message]
1577
  )
1578
  step3.click(
1579
+ lambda idx, status: navigate_to_tab(idx, status),
1580
  inputs=[gr.State(2), tab_completed],
1581
  outputs=[tabs, nav_message]
1582
  )
1583
  step4.click(
1584
+ lambda idx, status: navigate_to_tab(idx, status),
1585
  inputs=[gr.State(3), tab_completed],
1586
  outputs=[tabs, nav_message]
1587
  )
1588
  step5.click(
1589
+ lambda idx, status: navigate_to_tab(idx, status),
1590
  inputs=[gr.State(4), tab_completed],
1591
  outputs=[tabs, nav_message]
1592
  )
1593
 
1594
+ # Load model on startup
1595
+ app.load(fn=lambda: model_loader.load_model(), outputs=[])
 
 
 
1596
 
1597
  return app
1598
 
1599
+ # Create and launch the interface
1600
  app = create_interface()
1601
 
 
1602
  if __name__ == "__main__":
1603
  app.launch()
1604