Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -16,7 +16,7 @@ import secrets
|
|
16 |
import string
|
17 |
from huggingface_hub import HfApi, HfFolder
|
18 |
import torch
|
19 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
20 |
import time
|
21 |
import logging
|
22 |
import asyncio
|
@@ -35,6 +35,7 @@ from dateutil.relativedelta import relativedelta
|
|
35 |
import numpy as np
|
36 |
import matplotlib.pyplot as plt
|
37 |
from tqdm import tqdm
|
|
|
38 |
|
39 |
# Enhanced Configuration
|
40 |
PROFILES_DIR = "student_profiles"
|
@@ -61,7 +62,7 @@ logging.basicConfig(
|
|
61 |
logger = logging.getLogger(__name__)
|
62 |
|
63 |
# Model configuration
|
64 |
-
MODEL_NAME = "
|
65 |
|
66 |
@lru_cache(maxsize=1)
|
67 |
def get_model_and_tokenizer():
|
@@ -1472,374 +1473,188 @@ class EnhancedProfileManager:
|
|
1472 |
# Initialize profile manager
|
1473 |
profile_manager = EnhancedProfileManager()
|
1474 |
|
1475 |
-
class
|
1476 |
def __init__(self):
|
1477 |
-
self.
|
1478 |
-
self.
|
1479 |
-
self.model
|
1480 |
-
self.
|
1481 |
-
|
1482 |
-
|
1483 |
-
|
1484 |
-
|
1485 |
-
|
1486 |
-
|
1487 |
-
|
1488 |
-
|
1489 |
-
|
1490 |
-
|
1491 |
-
|
1492 |
-
|
1493 |
-
|
1494 |
-
|
1495 |
-
|
1496 |
-
|
1497 |
-
|
1498 |
-
|
1499 |
-
|
1500 |
-
|
1501 |
-
|
1502 |
-
|
1503 |
-
|
1504 |
-
|
1505 |
-
|
1506 |
-
|
1507 |
-
|
1508 |
-
|
1509 |
-
|
1510 |
-
|
1511 |
-
|
1512 |
-
|
1513 |
-
|
1514 |
-
|
1515 |
-
|
1516 |
-
|
1517 |
-
|
1518 |
-
|
1519 |
-
|
1520 |
-
|
1521 |
-
|
1522 |
-
|
1523 |
-
|
1524 |
-
|
1525 |
-
|
1526 |
-
|
1527 |
-
|
1528 |
-
|
1529 |
-
|
1530 |
-
|
1531 |
-
|
1532 |
-
|
1533 |
-
return "I encountered an error processing your request. Please try again."
|
1534 |
-
|
1535 |
-
def _classify_query(self, message: str) -> str:
|
1536 |
-
message_lower = message.lower()
|
1537 |
-
|
1538 |
-
if any(word in message_lower for word in ['gpa', 'grade', 'average']):
|
1539 |
-
return 'gpa'
|
1540 |
-
elif any(word in message_lower for word in ['study', 'learn', 'exam', 'test']):
|
1541 |
-
return 'study'
|
1542 |
-
elif any(word in message_lower for word in ['course', 'class', 'subject']):
|
1543 |
-
return 'courses'
|
1544 |
-
elif any(word in message_lower for word in ['college', 'university', 'apply']):
|
1545 |
-
return 'college'
|
1546 |
-
elif any(word in message_lower for word in ['plan', 'schedule', 'calendar']):
|
1547 |
-
return 'planning'
|
1548 |
-
elif any(word in message_lower for word in ['resource', 'book', 'website']):
|
1549 |
-
return 'resources'
|
1550 |
-
else:
|
1551 |
-
return 'general'
|
1552 |
-
|
1553 |
-
async def _generate_typed_response(self, query_type: str, message: str, context: str, profile: Dict) -> str:
|
1554 |
-
if query_type == 'gpa':
|
1555 |
-
return self._generate_gpa_response(profile)
|
1556 |
-
elif query_type == 'study':
|
1557 |
-
return self._generate_study_response(profile)
|
1558 |
-
elif query_type == 'courses':
|
1559 |
-
return self._generate_courses_response(profile)
|
1560 |
-
elif query_type == 'college':
|
1561 |
-
return self._generate_college_response(profile)
|
1562 |
-
elif query_type == 'planning':
|
1563 |
-
return self._generate_planning_response(profile)
|
1564 |
-
elif query_type == 'resources':
|
1565 |
-
return self._generate_resources_response(profile)
|
1566 |
-
else:
|
1567 |
-
return await self._generate_general_response(message, context)
|
1568 |
-
|
1569 |
-
def _generate_gpa_response(self, profile: Dict) -> str:
|
1570 |
-
transcript = profile.get('transcript', {})
|
1571 |
-
analysis = academic_analyzer.analyze_gpa(transcript)
|
1572 |
-
response = [
|
1573 |
-
f"## 📊 GPA Analysis",
|
1574 |
-
f"**Rating:** {analysis['rating']}",
|
1575 |
-
f"{analysis['description']}",
|
1576 |
-
f"{analysis['comparison']}",
|
1577 |
-
"",
|
1578 |
-
f"## 🎓 Graduation Status"
|
1579 |
-
]
|
1580 |
-
|
1581 |
-
grad_status = academic_analyzer.analyze_graduation_status(transcript)
|
1582 |
-
response.append(grad_status['status'])
|
1583 |
-
response.append(f"**Completion:** {grad_status['completion_percentage']:.1f}%")
|
1584 |
-
|
1585 |
-
if grad_status.get('missing_requirements'):
|
1586 |
-
response.append("\n**Missing Requirements:**")
|
1587 |
-
for req in grad_status['missing_requirements'][:3]: # Show top 3 missing
|
1588 |
-
if transcript.get('format') == 'progress_summary':
|
1589 |
-
response.append(f"- {req['code']}: {req['description']} ({req['remaining']} credits remaining)")
|
1590 |
-
else:
|
1591 |
-
response.append(f"- {req['subject']}: {req['remaining']} credits remaining")
|
1592 |
-
|
1593 |
-
response.append("\n## 🏫 College Recommendations")
|
1594 |
-
college_recs = academic_analyzer.generate_college_recommendations(transcript)
|
1595 |
-
|
1596 |
-
if college_recs['reach']:
|
1597 |
-
response.append("\n**Reach Schools:**")
|
1598 |
-
response.extend([f"- {school}" for school in college_recs['reach'][:3]])
|
1599 |
-
|
1600 |
-
if college_recs['target']:
|
1601 |
-
response.append("\n**Target Schools:**")
|
1602 |
-
response.extend([f"- {school}" for school in college_recs['target'][:3]])
|
1603 |
-
|
1604 |
-
if analysis.get('improvement_tips'):
|
1605 |
-
response.append("\n**Improvement Tips:**")
|
1606 |
-
response.extend([f"- {tip}" for tip in analysis['improvement_tips']])
|
1607 |
-
|
1608 |
-
return "\n".join(response)
|
1609 |
-
|
1610 |
-
def _generate_study_response(self, profile: Dict) -> str:
|
1611 |
-
learning_style_match = re.search(r"Your primary learning style is\s*\*\*(.*?)\*\*",
|
1612 |
-
profile.get('learning_style', ''))
|
1613 |
-
if not learning_style_match:
|
1614 |
-
return "Please complete the learning style quiz first to get personalized study advice."
|
1615 |
-
|
1616 |
-
learning_style = learning_style_match.group(1)
|
1617 |
-
study_plan = profile.get('study_plan', {})
|
1618 |
-
|
1619 |
-
response = [
|
1620 |
-
f"As a **{learning_style}** learner, here are some study strategies for you:"
|
1621 |
-
]
|
1622 |
-
|
1623 |
-
if study_plan.get('study_strategies'):
|
1624 |
-
response.extend([f"- {strategy}" for strategy in study_plan['study_strategies']])
|
1625 |
-
else:
|
1626 |
-
if learning_style.lower() == 'visual':
|
1627 |
-
response.extend([
|
1628 |
-
"- Use color coding in your notes",
|
1629 |
-
"- Create mind maps and diagrams",
|
1630 |
-
"- Watch educational videos to visualize concepts"
|
1631 |
-
])
|
1632 |
-
elif learning_style.lower() == 'auditory':
|
1633 |
-
response.extend([
|
1634 |
-
"- Record lectures and listen to them",
|
1635 |
-
"- Explain concepts out loud to yourself",
|
1636 |
-
"- Participate in study groups"
|
1637 |
-
])
|
1638 |
-
elif learning_style.lower() == 'reading/writing':
|
1639 |
-
response.extend([
|
1640 |
-
"- Write detailed summaries in your own words",
|
1641 |
-
"- Create question-answer sets for each topic",
|
1642 |
-
"- Rewrite your notes to reinforce learning"
|
1643 |
-
])
|
1644 |
-
elif learning_style.lower() == 'kinesthetic':
|
1645 |
-
response.extend([
|
1646 |
-
"- Use hands-on activities when possible",
|
1647 |
-
"- Study while walking or pacing",
|
1648 |
-
"- Create physical models to represent concepts"
|
1649 |
-
])
|
1650 |
|
1651 |
-
|
1652 |
-
response.append("\n**Time Management Tips:**")
|
1653 |
-
response.extend([f"- {tip}" for tip in study_plan['time_management_tips']])
|
1654 |
|
1655 |
-
|
1656 |
-
|
1657 |
-
def _generate_courses_response(self, profile: Dict) -> str:
|
1658 |
-
transcript = profile.get('transcript', {})
|
1659 |
-
if not transcript.get('course_history'):
|
1660 |
-
return "I couldn't find your course information. Please upload your transcript first."
|
1661 |
|
1662 |
-
|
1663 |
-
|
1664 |
-
if (course.get('status', '').lower() == 'in progress' or
|
1665 |
-
(isinstance(course.get('credit_earned'), float) and course['credit_earned'] == 0))
|
1666 |
-
]
|
1667 |
|
1668 |
-
|
1669 |
-
|
1670 |
-
|
1671 |
-
|
1672 |
-
|
1673 |
-
|
1674 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1675 |
|
1676 |
-
if
|
1677 |
-
|
1678 |
-
|
1679 |
-
|
1680 |
-
|
1681 |
-
f"
|
1682 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1683 |
)
|
1684 |
-
else:
|
1685 |
-
response.append("I couldn't find any current courses in your transcript.")
|
1686 |
-
|
1687 |
-
if completed_courses:
|
1688 |
-
response.append("\n**Recently Completed Courses:**")
|
1689 |
-
for course in completed_courses[:5]:
|
1690 |
-
course_name = course.get('description') or course.get('course_title', 'Unknown')
|
1691 |
-
grade = course.get('grade_earned', '') or course.get('grade', '')
|
1692 |
-
if grade:
|
1693 |
-
response.append(
|
1694 |
-
f"- {course_name} "
|
1695 |
-
f"(Grade: {grade})"
|
1696 |
-
)
|
1697 |
-
else:
|
1698 |
-
response.append(f"- {course_name}")
|
1699 |
-
|
1700 |
-
rigor = academic_analyzer.analyze_course_rigor(transcript)
|
1701 |
-
if rigor['rating']:
|
1702 |
-
response.append(f"\n**Course Rigor Analysis:** {rigor['rating']}")
|
1703 |
-
if rigor['recommendations']:
|
1704 |
-
response.append("\n**Recommendations:**")
|
1705 |
-
response.extend([f"- {rec}" for rec in rigor['recommendations']])
|
1706 |
-
|
1707 |
-
return "\n".join(response)
|
1708 |
-
|
1709 |
-
def _generate_college_response(self, profile: Dict) -> str:
|
1710 |
-
recommendations = academic_analyzer.generate_college_recommendations(profile.get('transcript', {}))
|
1711 |
-
|
1712 |
-
response = ["**College Recommendations Based on Your Profile:**"]
|
1713 |
-
|
1714 |
-
if recommendations['reach']:
|
1715 |
-
response.append("\n**Reach Schools (Competitive):**")
|
1716 |
-
response.extend([f"- {school}" for school in recommendations['reach'][:3]])
|
1717 |
-
|
1718 |
-
if recommendations['target']:
|
1719 |
-
response.append("\n**Target Schools (Good Match):**")
|
1720 |
-
response.extend([f"- {school}" for school in recommendations['target'][:3]])
|
1721 |
-
|
1722 |
-
if recommendations['safety']:
|
1723 |
-
response.append("\n**Safety Schools (Likely Admission):**")
|
1724 |
-
response.extend([f"- {school}" for school in recommendations['safety'][:3]])
|
1725 |
-
|
1726 |
-
if recommendations['scholarships']:
|
1727 |
-
response.append("\n**Scholarship Opportunities:**")
|
1728 |
-
response.extend([f"- {scholarship}" for scholarship in recommendations['scholarships'][:3]])
|
1729 |
-
|
1730 |
-
if recommendations['improvement_areas']:
|
1731 |
-
response.append("\n**Areas to Improve for College Admissions:**")
|
1732 |
-
response.extend([f"- {area}" for area in recommendations['improvement_areas']])
|
1733 |
-
|
1734 |
-
return "\n".join(response)
|
1735 |
-
|
1736 |
-
def _generate_planning_response(self, profile: Dict) -> str:
|
1737 |
-
study_plan = profile.get('study_plan', {})
|
1738 |
-
|
1739 |
-
response = ["**Study Planning Advice:**"]
|
1740 |
-
|
1741 |
-
if study_plan.get('weekly_schedule'):
|
1742 |
-
response.append("\nHere's a suggested weekly study schedule:")
|
1743 |
-
for day, activities in study_plan['weekly_schedule'].items():
|
1744 |
-
if activities:
|
1745 |
-
response.append(f"\n**{day}:**")
|
1746 |
-
for activity in activities[:2]:
|
1747 |
-
response.append(
|
1748 |
-
f"- {activity.get('course', 'Course')}: "
|
1749 |
-
f"{activity.get('duration', '45-60 minutes')}"
|
1750 |
-
)
|
1751 |
-
else:
|
1752 |
-
response.append("\nA good study schedule should include:")
|
1753 |
-
response.append("- 45-60 minute study blocks with short breaks")
|
1754 |
-
response.append("- Focus on 1-2 subjects per day")
|
1755 |
-
response.append("- Regular review sessions")
|
1756 |
-
|
1757 |
-
if study_plan.get('time_management_tips'):
|
1758 |
-
response.append("\n**Time Management Tips:**")
|
1759 |
-
response.extend([f"- {tip}" for tip in study_plan['time_management_tips'][:3]])
|
1760 |
-
|
1761 |
-
return "\n".join(response)
|
1762 |
-
|
1763 |
-
def _generate_resources_response(self, profile: Dict) -> str:
|
1764 |
-
study_plan = profile.get('study_plan', {})
|
1765 |
-
transcript = profile.get('transcript', {})
|
1766 |
-
|
1767 |
-
response = ["**Recommended Learning Resources:**"]
|
1768 |
|
1769 |
-
|
1770 |
-
|
1771 |
-
|
1772 |
-
|
1773 |
-
|
1774 |
-
|
1775 |
-
|
1776 |
-
|
|
|
1777 |
|
1778 |
-
|
1779 |
-
|
1780 |
-
|
1781 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1782 |
]
|
1783 |
|
1784 |
-
|
1785 |
-
|
1786 |
-
|
1787 |
-
course_name = course.get('description') or course.get('course_title', 'your course')
|
1788 |
-
if 'MATH' in course_name.upper():
|
1789 |
-
response.append(f"- For {course_name}: Desmos Graphing Calculator, Art of Problem Solving")
|
1790 |
-
elif 'SCIENCE' in course_name.upper():
|
1791 |
-
response.append(f"- For {course_name}: PhET Simulations, Crash Course Science videos")
|
1792 |
-
elif 'HISTORY' in course_name.upper():
|
1793 |
-
response.append(f"- For {course_name}: Crash Course History videos, Library of Congress resources")
|
1794 |
|
1795 |
-
return
|
1796 |
-
|
1797 |
-
|
1798 |
-
|
1799 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1800 |
|
1801 |
-
|
1802 |
-
|
1803 |
-
|
1804 |
-
inputs = self.tokenizer(prompt, return_tensors="pt").to(self.model.device)
|
1805 |
-
|
1806 |
-
outputs = self.model.generate(
|
1807 |
-
**inputs,
|
1808 |
-
max_new_tokens=200,
|
1809 |
-
temperature=0.7,
|
1810 |
-
top_p=0.9,
|
1811 |
-
repetition_penalty=1.1,
|
1812 |
-
do_sample=True
|
1813 |
-
)
|
1814 |
-
|
1815 |
-
response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
|
1816 |
-
|
1817 |
-
response = response[len(prompt):].strip()
|
1818 |
-
|
1819 |
-
if response and response[-1] not in {'.', '!', '?'}:
|
1820 |
-
last_period = response.rfind('.')
|
1821 |
-
if last_period > 0:
|
1822 |
-
response = response[:last_period + 1]
|
1823 |
-
|
1824 |
-
return response if response else "I'm not sure how to respond to that. Could you rephrase your question?"
|
1825 |
-
except Exception as e:
|
1826 |
-
logger.error(f"Model generation error: {str(e)}")
|
1827 |
-
return "I encountered an error generating a response. Please try again."
|
1828 |
-
|
1829 |
-
def _update_context(self, message: str, history: List[List[Union[str, None]]]) -> None:
|
1830 |
-
self.context_history.append({"role": "user", "content": message})
|
1831 |
|
1832 |
-
|
1833 |
-
|
1834 |
-
|
1835 |
-
|
1836 |
-
|
1837 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1838 |
|
1839 |
-
|
1840 |
|
1841 |
-
# Initialize
|
1842 |
-
|
1843 |
|
1844 |
class StudyCalendar:
|
1845 |
def __init__(self):
|
@@ -2574,20 +2389,32 @@ def create_enhanced_interface():
|
|
2574 |
]
|
2575 |
)
|
2576 |
|
2577 |
-
with gr.Tab("AI Assistant", id=4):
|
2578 |
-
gr.Markdown("## 💬 Your Personalized
|
2579 |
-
gr.Markdown("Ask
|
2580 |
|
2581 |
chatbot = gr.Chatbot(height=500)
|
2582 |
-
msg = gr.Textbox(label="Your
|
2583 |
-
clear = gr.Button("Clear")
|
2584 |
|
2585 |
-
def respond(message, chat_history):
|
2586 |
-
|
2587 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2588 |
return "", chat_history
|
2589 |
|
2590 |
-
msg.submit(
|
|
|
|
|
|
|
|
|
2591 |
clear.click(lambda: None, None, chatbot, queue=False)
|
2592 |
|
2593 |
with gr.Tab("Goals & Planning", id=5):
|
|
|
16 |
import string
|
17 |
from huggingface_hub import HfApi, HfFolder
|
18 |
import torch
|
19 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
20 |
import time
|
21 |
import logging
|
22 |
import asyncio
|
|
|
35 |
import numpy as np
|
36 |
import matplotlib.pyplot as plt
|
37 |
from tqdm import tqdm
|
38 |
+
import random
|
39 |
|
40 |
# Enhanced Configuration
|
41 |
PROFILES_DIR = "student_profiles"
|
|
|
62 |
logger = logging.getLogger(__name__)
|
63 |
|
64 |
# Model configuration
|
65 |
+
MODEL_NAME = "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5"
|
66 |
|
67 |
@lru_cache(maxsize=1)
|
68 |
def get_model_and_tokenizer():
|
|
|
1473 |
# Initialize profile manager
|
1474 |
profile_manager = EnhancedProfileManager()
|
1475 |
|
1476 |
+
class EducationalChatbot:
|
1477 |
def __init__(self):
|
1478 |
+
self.model_name = "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5"
|
1479 |
+
self.tokenizer = None
|
1480 |
+
self.model = None
|
1481 |
+
self.educational_topics = {
|
1482 |
+
'math': ['algebra', 'calculus', 'geometry', 'trigonometry'],
|
1483 |
+
'science': ['biology', 'chemistry', 'physics', 'astronomy'],
|
1484 |
+
'humanities': ['history', 'literature', 'philosophy'],
|
1485 |
+
'languages': ['english', 'spanish', 'french', 'grammar'],
|
1486 |
+
'arts': ['music', 'art', 'drama'],
|
1487 |
+
'technology': ['programming', 'computer science']
|
1488 |
+
}
|
1489 |
+
self.load_model()
|
1490 |
+
|
1491 |
+
def load_model(self):
|
1492 |
+
"""Load the HuggingFace model"""
|
1493 |
+
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
|
1494 |
+
self.model = AutoModelForCausalLM.from_pretrained(
|
1495 |
+
self.model_name,
|
1496 |
+
torch_dtype=torch.float16,
|
1497 |
+
device_map="auto"
|
1498 |
+
)
|
1499 |
+
logger.info("Educational chatbot model loaded")
|
1500 |
+
|
1501 |
+
def is_educational(self, question: str) -> bool:
|
1502 |
+
"""Check if question is educational"""
|
1503 |
+
question_lower = question.lower()
|
1504 |
+
for category, topics in self.educational_topics.items():
|
1505 |
+
if any(topic in question_lower for topic in topics):
|
1506 |
+
return True
|
1507 |
+
return False
|
1508 |
+
|
1509 |
+
def generate_response(self, question: str, profile: Dict) -> Tuple[str, List[Dict]]:
|
1510 |
+
"""Generate a personalized educational response"""
|
1511 |
+
if not self.is_educational(question):
|
1512 |
+
return (
|
1513 |
+
"I specialize in educational topics only. Please ask about subjects like math, "
|
1514 |
+
"science, history, or literature. I can help with concepts, problem-solving methods, "
|
1515 |
+
"and learning strategies.",
|
1516 |
+
[]
|
1517 |
+
)
|
1518 |
+
|
1519 |
+
# Get learning style from profile
|
1520 |
+
learning_style = self._get_learning_style(profile)
|
1521 |
+
|
1522 |
+
# Generate base response using the model
|
1523 |
+
prompt = self._build_prompt(question, profile)
|
1524 |
+
inputs = self.tokenizer(prompt, return_tensors="pt").to(self.model.device)
|
1525 |
+
|
1526 |
+
outputs = self.model.generate(
|
1527 |
+
**inputs,
|
1528 |
+
max_new_tokens=300,
|
1529 |
+
temperature=0.7,
|
1530 |
+
top_p=0.9,
|
1531 |
+
repetition_penalty=1.2,
|
1532 |
+
do_sample=True
|
1533 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1534 |
|
1535 |
+
raw_response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
|
|
1536 |
|
1537 |
+
# Process the response to be more pedagogical
|
1538 |
+
processed_response = self._make_response_pedagogical(raw_response)
|
|
|
|
|
|
|
|
|
1539 |
|
1540 |
+
# Add multimedia based on learning style
|
1541 |
+
multimedia = self._get_multimedia_suggestions(processed_response, learning_style)
|
|
|
|
|
|
|
1542 |
|
1543 |
+
return processed_response, multimedia
|
1544 |
+
|
1545 |
+
def _get_learning_style(self, profile: Dict) -> str:
|
1546 |
+
"""Extract learning style from profile"""
|
1547 |
+
if not profile or 'learning_style' not in profile:
|
1548 |
+
return 'balanced'
|
1549 |
+
|
1550 |
+
style_match = re.search(r"Your primary learning style is\s*\*\*(.*?)\*\*",
|
1551 |
+
profile['learning_style'])
|
1552 |
+
return style_match.group(1).lower() if style_match else 'balanced'
|
1553 |
+
|
1554 |
+
def _build_prompt(self, question: str, profile: Dict) -> str:
|
1555 |
+
"""Build a personalized prompt for the model"""
|
1556 |
+
base_prompt = (
|
1557 |
+
"You are an expert teaching assistant helping a student. Your role is to guide them "
|
1558 |
+
"to discover answers themselves, not provide direct solutions. Use the Socratic method "
|
1559 |
+
"by asking guiding questions and explaining concepts step-by-step.\n\n"
|
1560 |
+
)
|
1561 |
|
1562 |
+
if profile:
|
1563 |
+
# Add academic context if available
|
1564 |
+
if 'transcript' in profile:
|
1565 |
+
courses = [c['course_title'] for c in profile['transcript'].get('course_history', [])]
|
1566 |
+
base_prompt += (
|
1567 |
+
f"The student has taken these courses: {', '.join(courses[:5])}. "
|
1568 |
+
"Consider their academic background when responding.\n\n"
|
1569 |
+
)
|
1570 |
+
|
1571 |
+
# Add learning style
|
1572 |
+
learning_style = self._get_learning_style(profile)
|
1573 |
+
if learning_style != 'balanced':
|
1574 |
+
base_prompt += (
|
1575 |
+
f"The student is a {learning_style} learner. Adapt your teaching approach accordingly.\n\n"
|
1576 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1577 |
|
1578 |
+
base_prompt += (
|
1579 |
+
f"Student Question: {question}\n\n"
|
1580 |
+
"Teaching Assistant Response:\n"
|
1581 |
+
"1. First, let's understand the key concepts involved...\n"
|
1582 |
+
"2. What do you think would be the first step in solving this?\n"
|
1583 |
+
"3. Consider this approach...\n"
|
1584 |
+
"4. Here's how we might break this down...\n"
|
1585 |
+
"Remember, the goal is understanding, not just the answer."
|
1586 |
+
)
|
1587 |
|
1588 |
+
return base_prompt
|
1589 |
+
|
1590 |
+
def _make_response_pedagogical(self, response: str) -> str:
|
1591 |
+
"""Process the raw response to be more teaching-oriented"""
|
1592 |
+
# Remove direct answers if present
|
1593 |
+
response = re.sub(r"(the answer is|it is|direct solution:) .*?(\n|$)", "", response, flags=re.I)
|
1594 |
+
|
1595 |
+
# Add more guiding language
|
1596 |
+
guiding_phrases = [
|
1597 |
+
"What do you think about...",
|
1598 |
+
"Have you considered...",
|
1599 |
+
"Let's break this down...",
|
1600 |
+
"One approach might be...",
|
1601 |
+
"Think about how you would...",
|
1602 |
+
"What steps would you take to..."
|
1603 |
]
|
1604 |
|
1605 |
+
# Ensure response has at least 2 guiding questions
|
1606 |
+
if sum(1 for phrase in guiding_phrases if phrase.lower() in response.lower()) < 2:
|
1607 |
+
response += "\n\n" + "\n".join(guiding_phrases[:2])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1608 |
|
1609 |
+
return response
|
1610 |
+
|
1611 |
+
def _get_multimedia_suggestions(self, response: str, learning_style: str) -> List[Dict]:
|
1612 |
+
"""Generate multimedia suggestions based on learning style and content"""
|
1613 |
+
suggestions = []
|
1614 |
+
|
1615 |
+
# Common educational platforms
|
1616 |
+
resources = {
|
1617 |
+
'visual': [
|
1618 |
+
{"type": "video", "source": "Khan Academy", "url": "https://www.khanacademy.org"},
|
1619 |
+
{"type": "diagram", "source": "Math is Fun", "url": "https://www.mathsisfun.com"},
|
1620 |
+
{"type": "infographic", "source": "InfoGram", "url": "https://infogram.com"}
|
1621 |
+
],
|
1622 |
+
'auditory': [
|
1623 |
+
{"type": "podcast", "source": "Stuff You Should Know", "url": "https://www.iheart.com/podcast/stuff-you-should-know-26940277"},
|
1624 |
+
{"type": "audio_lecture", "source": "The Great Courses", "url": "https://www.thegreatcourses.com"}
|
1625 |
+
],
|
1626 |
+
'reading/writing': [
|
1627 |
+
{"type": "article", "source": "Britannica", "url": "https://www.britannica.com"},
|
1628 |
+
{"type": "textbook", "source": "OpenStax", "url": "https://openstax.org"}
|
1629 |
+
],
|
1630 |
+
'kinesthetic': [
|
1631 |
+
{"type": "interactive", "source": "PhET Simulations", "url": "https://phet.colorado.edu"},
|
1632 |
+
{"type": "hands-on", "source": "Science Buddies", "url": "https://www.sciencebuddies.org"}
|
1633 |
+
]
|
1634 |
+
}
|
1635 |
|
1636 |
+
# Add general suggestions based on learning style
|
1637 |
+
if learning_style in resources:
|
1638 |
+
suggestions.extend(resources[learning_style][:2])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1639 |
|
1640 |
+
# Add specific content based on response
|
1641 |
+
if "math" in response.lower():
|
1642 |
+
suggestions.append({
|
1643 |
+
"type": "practice_problems",
|
1644 |
+
"source": "Art of Problem Solving",
|
1645 |
+
"url": "https://artofproblemsolving.com"
|
1646 |
+
})
|
1647 |
+
elif "science" in response.lower():
|
1648 |
+
suggestions.append({
|
1649 |
+
"type": "experiment",
|
1650 |
+
"source": "Science Journal",
|
1651 |
+
"url": "https://sciencejournal.withgoogle.com"
|
1652 |
+
})
|
1653 |
|
1654 |
+
return suggestions
|
1655 |
|
1656 |
+
# Initialize the chatbot
|
1657 |
+
educational_chatbot = EducationalChatbot()
|
1658 |
|
1659 |
class StudyCalendar:
|
1660 |
def __init__(self):
|
|
|
2389 |
]
|
2390 |
)
|
2391 |
|
2392 |
+
with gr.Tab("AI Teaching Assistant", id=4):
|
2393 |
+
gr.Markdown("## 💬 Your Personalized Teaching Assistant")
|
2394 |
+
gr.Markdown("Ask educational questions about any subject. I'll guide you to discover the answers yourself.")
|
2395 |
|
2396 |
chatbot = gr.Chatbot(height=500)
|
2397 |
+
msg = gr.Textbox(label="Your Educational Question")
|
2398 |
+
clear = gr.Button("Clear Chat")
|
2399 |
|
2400 |
+
def respond(message: str, chat_history: List, profile: Dict) -> Tuple[str, List]:
|
2401 |
+
"""Handle chat responses with multimedia"""
|
2402 |
+
response, multimedia = educational_chatbot.generate_response(message, profile)
|
2403 |
+
|
2404 |
+
# Format multimedia suggestions
|
2405 |
+
if multimedia:
|
2406 |
+
response += "\n\n**Suggested Resources:**\n"
|
2407 |
+
for item in multimedia:
|
2408 |
+
response += f"- [{item['type'].title()}] {item['source']}: {item['url']}\n"
|
2409 |
+
|
2410 |
+
chat_history.append((message, response))
|
2411 |
return "", chat_history
|
2412 |
|
2413 |
+
msg.submit(
|
2414 |
+
respond,
|
2415 |
+
inputs=[msg, chatbot, gr.State(profile_manager.load_profile(session_token.value))],
|
2416 |
+
outputs=[msg, chatbot]
|
2417 |
+
)
|
2418 |
clear.click(lambda: None, None, chatbot, queue=False)
|
2419 |
|
2420 |
with gr.Tab("Goals & Planning", id=5):
|