File size: 3,234 Bytes
7b4f776
23a0568
 
 
 
 
 
 
06e461d
61e7eb4
552d9ba
 
 
23a0568
 
 
 
7b4f776
23a0568
7b4f776
23a0568
 
7b4f776
 
 
 
 
61e7eb4
7b4f776
 
 
61e7eb4
 
 
23a0568
 
 
61e7eb4
23a0568
7b4f776
 
 
 
 
 
 
 
 
 
 
 
 
 
61e7eb4
 
 
 
 
 
7b4f776
61e7eb4
 
 
 
 
 
 
23a0568
7b4f776
b0278fb
61e7eb4
7b4f776
 
 
23c6c56
 
7b4f776
 
61e7eb4
7b4f776
 
 
 
23a0568
 
7b4f776
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import gradio as gr
import pytube
from youtube_transcript_api import YouTubeTranscriptApi as yt
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import os
from langchain import PromptTemplate
from langchain import LLMChain
from langchain_together import Together
import re

# Set the API key with double quotes
os.environ['TOGETHER_API_KEY'] = "d88cb7414e4039a84d2ed63f1b47daaaa4230c4c53a422045d8a30a9a3bc87d8"

def Summary_BART(text):
    checkpoint = "sshleifer/distilbart-cnn-12-6"
    tokenizer = AutoTokenizer.from_pretrained(checkpoint)
    model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint)
    inputs = tokenizer(text, max_length=1024, truncation=True, return_tensors="pt")
    summary_ids = model.generate(inputs["input_ids"])
    summary = tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
    return summary[0]

def YtToQuizz(link, difficulty_level):
    video_id = pytube.extract.video_id(link)
    transcript = yt.get_transcript(video_id)
    data = ""
    for text in transcript:
        data += text.get('text') + " "
    summary = Summary_BART(data)
    
    mcq_template = """
    Generate 10 different multiple-choice questions (MCQs) related to the following summary: {summary}
    The difficulty level of the questions should be: {difficulty_level}
    Please provide the following for each question:
    1. Question
    2. Correct answer
    3. Three plausible incorrect answer options
    4. Format: "Question: <question text>\\nCorrect answer: <correct answer>\\nIncorrect answers: <option1>, <option2>, <option3>"
    """ 
    prompt = PromptTemplate(
        input_variables=['summary', 'difficulty_level'],
        template=mcq_template
    )
    llama3 = Together(model="meta-llama/Llama-3-70b-chat-hf", max_tokens=2500)
    Generated_mcqs = LLMChain(llm=llama3, prompt=prompt)

    response = Generated_mcqs.invoke({
        "summary": summary,
        "difficulty_level": difficulty_level
    })

    response_text = response['text']

    # Extract MCQs
    mcq_pattern = r'Question: (.*?)\nCorrect answer: (.*?)\nIncorrect answers: (.*?)(?:\n|$)'
    mcqs = re.findall(mcq_pattern, response_text, re.DOTALL)

    if len(mcqs) < 10:
        return "Failed to generate 10 complete MCQs. Please try again."

    formatted_mcqs = []
    for idx, mcq in enumerate(mcqs[:10]):
        question, correct_answer, incorrect_answers = mcq
        incorrect_answers = incorrect_answers.split(', ')
        formatted_mcqs.append(f"Q{idx+1}: {question}\nA) {correct_answer}\nB) {incorrect_answers[0]}\nC) {incorrect_answers[1]}\nD) {incorrect_answers[2]}\n")

    return "\n\n".join(formatted_mcqs)

def main(link, difficulty_level):
    return YtToQuizz(link, difficulty_level)

iface = gr.Interface(
    fn=main,
    inputs=[
        gr.components.Textbox(lines=2, placeholder="Enter YouTube video link"),
        gr.components.Dropdown(["Easy", "Medium", "Hard"], label="Select difficulty level:")
    ],
    outputs=[
        gr.components.Textbox(label="MCQs Output", lines=20)
    ],
    title="YouTube Video Subtitle to MCQs Quiz",
    description="Generate MCQs from YouTube video subtitles"
)

if __name__ == '__main__':
    iface.launch()