lodhrangpt commited on
Commit
2b0dd62
·
verified ·
1 Parent(s): 36def4c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +80 -26
app.py CHANGED
@@ -45,34 +45,88 @@ def transcribe(audio_path):
45
  return create_error_pdf(f"API Error: {error_msg}")
46
 
47
  def generate_notes(transcript):
48
- try:
49
- sentences = sent_tokenize(transcript)
50
- except LookupError:
51
- sentences = custom_sent_tokenize(transcript)
52
-
53
- # Generate long questions
54
- long_questions = [f"Explain the concept discussed in: '{sentence}'." for sentence in sentences[:5]]
55
-
56
- # Generate short questions
57
- short_questions = [f"What does '{sentence.split()[0]}' mean in the context of this text?" for sentence in sentences[:5]]
58
-
59
- # Generate MCQs with relevant distractors
60
- mcqs = []
61
- for sentence in sentences[:5]:
62
- if len(sentence.split()) > 1: # Ensure there are enough words to create meaningful options
63
- key_word = sentence.split()[0] # Use the first word as a key term
64
- distractors = ["Term A", "Term B", "Term C"] # Replace with relevant terms if needed
65
- options = [key_word] + distractors
66
- random.shuffle(options) # Shuffle options for randomness
67
- mcq = {
68
- "question": f"What is '{key_word}' based on the context?",
69
- "options": options,
70
- "answer": key_word
71
- }
72
- mcqs.append(mcq)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
 
74
  # Generate and save a structured PDF
75
- pdf_path = create_pdf(transcript, long_questions, short_questions, mcqs)
76
  return pdf_path
77
 
78
  def create_pdf(transcript, long_questions, short_questions, mcqs):
 
45
  return create_error_pdf(f"API Error: {error_msg}")
46
 
47
  def generate_notes(transcript):
48
+ # try:
49
+ # sentences = sent_tokenize(transcript)
50
+ # except LookupError:
51
+ # sentences = custom_sent_tokenize(transcript)
52
+
53
+ # # Generate long questions
54
+ # long_questions = [f"Explain the concept discussed in: '{sentence}'." for sentence in sentences[:5]]
55
+
56
+ # # Generate short questions
57
+ # short_questions = [f"What does '{sentence.split()[0]}' mean in the context of this text?" for sentence in sentences[:5]]
58
+
59
+ # # Generate MCQs with relevant distractors
60
+ # mcqs = []
61
+ # for sentence in sentences[:5]:
62
+ # if len(sentence.split()) > 1: # Ensure there are enough words to create meaningful options
63
+ # key_word = sentence.split()[0] # Use the first word as a key term
64
+ # distractors = ["Term A", "Term B", "Term C"] # Replace with relevant terms if needed
65
+ # options = [key_word] + distractors
66
+ # random.shuffle(options) # Shuffle options for randomness
67
+ # mcq = {
68
+ # "question": f"What is '{key_word}' based on the context?",
69
+ # "options": options,
70
+ # "answer": key_word
71
+ # }
72
+ # mcqs.append(mcq)
73
+ client = Groq(api_key="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")
74
+
75
+ chat_completion = client.chat.completions.create(
76
+ #
77
+ # Required parameters
78
+ #
79
+ messages=[
80
+ # Set an optional system message. This sets the behavior of the
81
+ # assistant and can be used to provide specific instructions for
82
+ # how it should behave throughout the conversation.
83
+ {
84
+ "role": "system",
85
+ "content": "you are expert question generator from content. Generate one long question,possible number of short questions and mcqs"
86
+ },
87
+ # Set a user message for the assistant to respond to.
88
+ {
89
+ "role": "user",
90
+ "content": transcript,
91
+ }
92
+ ],
93
+
94
+ # The language model which will generate the completion.
95
+ model="llama3-8b-8192",
96
+
97
+ #
98
+ # Optional parameters
99
+ #
100
+
101
+ # Controls randomness: lowering results in less random completions.
102
+ # As the temperature approaches zero, the model will become deterministic
103
+ # and repetitive.
104
+ temperature=0.5,
105
+
106
+ # The maximum number of tokens to generate. Requests can use up to
107
+ # 32,768 tokens shared between prompt and completion.
108
+ max_tokens=1024,
109
+
110
+ # Controls diversity via nucleus sampling: 0.5 means half of all
111
+ # likelihood-weighted options are considered.
112
+ top_p=1,
113
+
114
+ # A stop sequence is a predefined or user-specified text string that
115
+ # signals an AI to stop generating content, ensuring its responses
116
+ # remain focused and concise. Examples include punctuation marks and
117
+ # markers like "[end]".
118
+ stop=None,
119
+
120
+ # If set, partial message deltas will be sent.
121
+ stream=False,
122
+ )
123
+
124
+ # Print the completion returned by the LLM.
125
+ res=chat_completion.choices[0].message.content
126
+
127
 
128
  # Generate and save a structured PDF
129
+ pdf_path = create_pdf(res)
130
  return pdf_path
131
 
132
  def create_pdf(transcript, long_questions, short_questions, mcqs):