Update student_functions.py
Browse files- student_functions.py +100 -108
student_functions.py
CHANGED
@@ -11,8 +11,11 @@ from youtube_transcript_api.formatters import JSONFormatter
|
|
11 |
from urllib.parse import urlparse, parse_qs
|
12 |
from pypdf import PdfReader
|
13 |
from ai71 import AI71
|
|
|
14 |
import os
|
15 |
-
|
|
|
|
|
16 |
AI71_API_KEY = "api71-api-652e5c6c-8edf-41d0-9c34-28522b07bef9"
|
17 |
|
18 |
|
@@ -28,26 +31,24 @@ def extract_text_from_pdf_s(pdf_path):
|
|
28 |
|
29 |
def generate_response_from_pdf(query, pdf_text):
|
30 |
response = ''
|
31 |
-
|
32 |
-
model=
|
33 |
messages=[
|
34 |
{"role": "system", "content": "You are a pdf questioning assistant."},
|
35 |
{"role": "user",
|
36 |
"content": f'''Answer the querry based on the given content.Content:{pdf_text},query:{query}'''},
|
37 |
-
]
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
return response.replace("###", '')
|
43 |
|
44 |
|
45 |
def generate_quiz(subject, topic, count, difficult):
|
46 |
quiz_output = ""
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
messages=[
|
51 |
{"role": "system", "content": "You are a teaching assistant."},
|
52 |
{"role": "user",
|
53 |
"content": f'''Generate {count} multiple-choice questions in the subject of {subject} for the topic {topic} for students at a {difficult} level. Ensure the questions are well-diversified and cover various aspects of the topic. Format the questions as follows:
|
@@ -57,93 +58,86 @@ Question: [Question text] [specific concept in a question]
|
|
57 |
<<o>> [Option3]
|
58 |
<<o>> [Option4],
|
59 |
Answer: [Option number]'''},
|
60 |
-
]
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
print("Quiz generated")
|
66 |
-
return quiz_output
|
67 |
|
68 |
|
69 |
|
70 |
def generate_ai_response(query):
|
71 |
ai_response = ''
|
72 |
-
|
73 |
-
model=
|
74 |
-
messages=[
|
75 |
{"role": "system", "content": "You are a teaching assistant."},
|
76 |
{"role": "user", "content": f'Assist the user clearly for his questions: {query}.'},
|
77 |
-
]
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
return ai_response.replace('###', '')
|
83 |
|
84 |
|
85 |
def generate_project_idea(subject, topic, overview):
|
86 |
string = ''
|
87 |
-
|
88 |
-
model=
|
89 |
-
messages=[
|
90 |
{"role": "system", "content": "You are a project building assistant."},
|
91 |
{"role": "user",
|
92 |
"content": f'''Give the different project ideas to build project in {subject} specifically in {topic} for school students. {overview}.'''},
|
93 |
-
]
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
string += chunk.choices[0].delta.content
|
98 |
-
return string
|
99 |
|
|
|
100 |
|
101 |
def generate_project_idea_questions(project_idea, query):
|
102 |
project_idea_answer = ''
|
103 |
-
|
104 |
-
model=
|
105 |
messages=[
|
106 |
{"role": "system", "content": "You are a project building assistant."},
|
107 |
{"role": "user",
|
108 |
"content": f'''Assist me clearly for the following question for the given idea. Idea: {project_idea}. Question: {query}'''},
|
109 |
-
]
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
return project_idea_answer
|
115 |
|
116 |
|
117 |
def generate_step_by_step_explanation(query):
|
118 |
explanation = ''
|
119 |
-
|
120 |
-
model=
|
121 |
messages=[
|
122 |
{"role": "system", "content": "You are the best teaching assistant."},
|
123 |
{"role": "user",
|
124 |
"content": f'''Provide me the clear step by step explanation answer for the following question. Question: {query}'''},
|
125 |
-
]
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
return explanation.replace('###', '')
|
131 |
|
132 |
|
133 |
def study_plan(subjects, hours, arealag, goal):
|
134 |
plan = ''
|
135 |
-
|
136 |
-
model=
|
137 |
messages=[
|
138 |
{"role": "system", "content": "You are the best teaching assistant."},
|
139 |
{"role": "user",
|
140 |
"content": f'''Provide me the clear personalised study plan for the subjects {subjects} i lag in areas like {arealag}, im available for {hours} hours per day and my study goal is to {goal}.Provide me like a timetable like day1,day2 for 5 days with concepts,also suggest some books'''},
|
141 |
-
]
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
return plan.replace('\n', '<br>')
|
147 |
|
148 |
|
149 |
class ConversationBufferMemory:
|
@@ -170,18 +164,19 @@ def spk_msg(user_input, memory):
|
|
170 |
"content": f"Previous conversation:\n{chat_history}\n\nNew human question: {user_input}\nResponse:"}
|
171 |
]
|
172 |
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
|
|
185 |
|
186 |
|
187 |
def get_first_youtube_video_link(query):
|
@@ -204,17 +199,16 @@ def get_first_youtube_video_link(query):
|
|
204 |
|
205 |
def content_translate(text):
|
206 |
translated_content = ''
|
207 |
-
|
208 |
-
model=
|
209 |
-
messages=[
|
210 |
{"role": "system", "content": "You are the best teaching assistant."},
|
211 |
{"role": "user", "content": f'''Translate the text to hindi. Text: {text}'''},
|
212 |
-
]
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
return translated_content
|
218 |
|
219 |
|
220 |
def get_video_id(url):
|
@@ -272,18 +266,16 @@ def get_simplified_explanation(text):
|
|
272 |
)
|
273 |
|
274 |
response = ""
|
275 |
-
|
276 |
-
model=
|
277 |
messages=[
|
278 |
{"role": "system", "content": "You are a helpful assistant."},
|
279 |
{"role": "user", "content": prompt},
|
280 |
-
]
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
return response
|
287 |
|
288 |
|
289 |
def summarise_text(url):
|
@@ -316,17 +308,18 @@ def generate_speech_from_pdf(content):
|
|
316 |
print(f"Directory {directory} does not exist.")
|
317 |
|
318 |
speech = ''
|
319 |
-
|
320 |
-
model=
|
321 |
messages=[
|
322 |
{"role": "system", "content": "You are a summarising assistant."},
|
323 |
{"role": "user",
|
324 |
"content": f'''Summarise the given content for each chapter for 1 sentence.Content={content}'''},
|
325 |
-
]
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
|
|
330 |
speech = speech[:-6].replace("###", '')
|
331 |
chapters = speech.split('\n\n')
|
332 |
pdf_audio(chapters[:4])
|
@@ -343,14 +336,13 @@ def content_translate(text):
|
|
343 |
|
344 |
|
345 |
translated_content = ''
|
346 |
-
|
347 |
-
|
348 |
-
|
349 |
{"role": "system", "content": "You are the best teaching assistant."},
|
350 |
{"role": "user", "content": f'''Translate the text to hindi. Text: {text}'''},
|
351 |
-
]
|
352 |
-
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
return translated_content
|
|
|
11 |
from urllib.parse import urlparse, parse_qs
|
12 |
from pypdf import PdfReader
|
13 |
from ai71 import AI71
|
14 |
+
from mistralai import Mistral
|
15 |
import os
|
16 |
+
API_KEY = 'xQ2Zhfsp4cLar4lvBRDWZKljvp0Ej427'
|
17 |
+
MODEL = "mistral-large-latest"
|
18 |
+
client = Mistral(api_key=API_KEY)
|
19 |
AI71_API_KEY = "api71-api-652e5c6c-8edf-41d0-9c34-28522b07bef9"
|
20 |
|
21 |
|
|
|
31 |
|
32 |
def generate_response_from_pdf(query, pdf_text):
|
33 |
response = ''
|
34 |
+
chat_response = client.chat.complete(
|
35 |
+
model=MODEL,
|
36 |
messages=[
|
37 |
{"role": "system", "content": "You are a pdf questioning assistant."},
|
38 |
{"role": "user",
|
39 |
"content": f'''Answer the querry based on the given content.Content:{pdf_text},query:{query}'''},
|
40 |
+
]
|
41 |
+
)
|
42 |
+
response_content = chat_response.choices[0].message.content
|
43 |
+
return jsonify({"response": response_content})
|
44 |
+
|
|
|
45 |
|
46 |
|
47 |
def generate_quiz(subject, topic, count, difficult):
|
48 |
quiz_output = ""
|
49 |
+
chat_response = client.chat.complete(
|
50 |
+
model=MODEL,
|
51 |
+
messages=messages=[
|
|
|
52 |
{"role": "system", "content": "You are a teaching assistant."},
|
53 |
{"role": "user",
|
54 |
"content": f'''Generate {count} multiple-choice questions in the subject of {subject} for the topic {topic} for students at a {difficult} level. Ensure the questions are well-diversified and cover various aspects of the topic. Format the questions as follows:
|
|
|
58 |
<<o>> [Option3]
|
59 |
<<o>> [Option4],
|
60 |
Answer: [Option number]'''},
|
61 |
+
]
|
62 |
+
)
|
63 |
+
response_content = chat_response.choices[0].message.content
|
64 |
+
return jsonify({"response": response_content})
|
65 |
+
|
|
|
|
|
66 |
|
67 |
|
68 |
|
69 |
def generate_ai_response(query):
|
70 |
ai_response = ''
|
71 |
+
chat_response = client.chat.complete(
|
72 |
+
model=MODEL,
|
73 |
+
messages=messages=[
|
74 |
{"role": "system", "content": "You are a teaching assistant."},
|
75 |
{"role": "user", "content": f'Assist the user clearly for his questions: {query}.'},
|
76 |
+
]
|
77 |
+
)
|
78 |
+
response_content = chat_response.choices[0].message.content
|
79 |
+
return jsonify({"response": response_content})
|
80 |
+
|
|
|
81 |
|
82 |
|
83 |
def generate_project_idea(subject, topic, overview):
|
84 |
string = ''
|
85 |
+
chat_response = client.chat.complete(
|
86 |
+
model=MODEL,
|
87 |
+
messages=messages=[
|
88 |
{"role": "system", "content": "You are a project building assistant."},
|
89 |
{"role": "user",
|
90 |
"content": f'''Give the different project ideas to build project in {subject} specifically in {topic} for school students. {overview}.'''},
|
91 |
+
]
|
92 |
+
)
|
93 |
+
response_content = chat_response.choices[0].message.content
|
94 |
+
return jsonify({"response": response_content})
|
|
|
|
|
95 |
|
96 |
+
|
97 |
|
98 |
def generate_project_idea_questions(project_idea, query):
|
99 |
project_idea_answer = ''
|
100 |
+
chat_response = client.chat.complete(
|
101 |
+
model=MODEL,
|
102 |
messages=[
|
103 |
{"role": "system", "content": "You are a project building assistant."},
|
104 |
{"role": "user",
|
105 |
"content": f'''Assist me clearly for the following question for the given idea. Idea: {project_idea}. Question: {query}'''},
|
106 |
+
]
|
107 |
+
)
|
108 |
+
response_content = chat_response.choices[0].message.content
|
109 |
+
return jsonify({"response": response_content})
|
110 |
+
|
|
|
111 |
|
112 |
|
113 |
def generate_step_by_step_explanation(query):
|
114 |
explanation = ''
|
115 |
+
chat_response = client.chat.complete(
|
116 |
+
model=MODEL,
|
117 |
messages=[
|
118 |
{"role": "system", "content": "You are the best teaching assistant."},
|
119 |
{"role": "user",
|
120 |
"content": f'''Provide me the clear step by step explanation answer for the following question. Question: {query}'''},
|
121 |
+
]
|
122 |
+
)
|
123 |
+
response_content = chat_response.choices[0].message.content
|
124 |
+
return jsonify({"response": response_content})
|
125 |
+
|
|
|
126 |
|
127 |
|
128 |
def study_plan(subjects, hours, arealag, goal):
|
129 |
plan = ''
|
130 |
+
chat_response = client.chat.complete(
|
131 |
+
model=MODEL,
|
132 |
messages=[
|
133 |
{"role": "system", "content": "You are the best teaching assistant."},
|
134 |
{"role": "user",
|
135 |
"content": f'''Provide me the clear personalised study plan for the subjects {subjects} i lag in areas like {arealag}, im available for {hours} hours per day and my study goal is to {goal}.Provide me like a timetable like day1,day2 for 5 days with concepts,also suggest some books'''},
|
136 |
+
]
|
137 |
+
)
|
138 |
+
response_content = chat_response.choices[0].message.content
|
139 |
+
return jsonify({"response": response_content.replace('\n', '<br>')})
|
140 |
+
|
|
|
141 |
|
142 |
|
143 |
class ConversationBufferMemory:
|
|
|
164 |
"content": f"Previous conversation:\n{chat_history}\n\nNew human question: {user_input}\nResponse:"}
|
165 |
]
|
166 |
|
167 |
+
if 1==1:
|
168 |
+
chat_response = client.chat.complete(
|
169 |
+
model=MODEL,
|
170 |
+
messages=[
|
171 |
+
{"role": "system", "content": "You are a pdf questioning assistant."},
|
172 |
+
{"role": "user",
|
173 |
+
"content": f'''Answer the querry based on the given content.Content:{pdf_text},query:{query}'''},
|
174 |
+
]
|
175 |
+
)
|
176 |
+
response_content = chat_response.choices[0].message.content
|
177 |
+
return jsonify({"response": response_content})
|
178 |
+
|
179 |
+
|
180 |
|
181 |
|
182 |
def get_first_youtube_video_link(query):
|
|
|
199 |
|
200 |
def content_translate(text):
|
201 |
translated_content = ''
|
202 |
+
chat_response = client.chat.complete(
|
203 |
+
model=MODEL,
|
204 |
+
messages=messages=[
|
205 |
{"role": "system", "content": "You are the best teaching assistant."},
|
206 |
{"role": "user", "content": f'''Translate the text to hindi. Text: {text}'''},
|
207 |
+
]
|
208 |
+
)
|
209 |
+
response_content = chat_response.choices[0].message.content
|
210 |
+
return jsonify({"response": response_content})
|
211 |
+
|
|
|
212 |
|
213 |
|
214 |
def get_video_id(url):
|
|
|
266 |
)
|
267 |
|
268 |
response = ""
|
269 |
+
chat_response = client.chat.complete(
|
270 |
+
model=MODEL,
|
271 |
messages=[
|
272 |
{"role": "system", "content": "You are a helpful assistant."},
|
273 |
{"role": "user", "content": prompt},
|
274 |
+
]
|
275 |
+
)
|
276 |
+
response_content = chat_response.choices[0].message.content
|
277 |
+
return jsonify({"response": response_content})
|
278 |
+
|
|
|
|
|
279 |
|
280 |
|
281 |
def summarise_text(url):
|
|
|
308 |
print(f"Directory {directory} does not exist.")
|
309 |
|
310 |
speech = ''
|
311 |
+
chat_response = client.chat.complete(
|
312 |
+
model=MODEL,
|
313 |
messages=[
|
314 |
{"role": "system", "content": "You are a summarising assistant."},
|
315 |
{"role": "user",
|
316 |
"content": f'''Summarise the given content for each chapter for 1 sentence.Content={content}'''},
|
317 |
+
]
|
318 |
+
)
|
319 |
+
response_content = chat_response.choices[0].message.content
|
320 |
+
|
321 |
+
if response_content:
|
322 |
+
speech += response_content
|
323 |
speech = speech[:-6].replace("###", '')
|
324 |
chapters = speech.split('\n\n')
|
325 |
pdf_audio(chapters[:4])
|
|
|
336 |
|
337 |
|
338 |
translated_content = ''
|
339 |
+
chat_response = client.chat.complete(
|
340 |
+
model=MODEL,
|
341 |
+
messages=[
|
342 |
{"role": "system", "content": "You are the best teaching assistant."},
|
343 |
{"role": "user", "content": f'''Translate the text to hindi. Text: {text}'''},
|
344 |
+
]
|
345 |
+
)
|
346 |
+
response_content = chat_response.choices[0].message.content
|
347 |
+
return jsonify({"response": response_content})
|
348 |
+
|
|