palbha commited on
Commit
5036278
·
verified ·
1 Parent(s): e4a2d17

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +63 -0
app.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, TextStreamer
4
+
5
+ # Whisper Model for Transcription
6
+ WHISPER_MODEL = "openai/whisper-large-v3"
7
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
8
+
9
+ transcriber = pipeline(
10
+ task="automatic-speech-recognition",
11
+ model=WHISPER_MODEL,
12
+ chunk_length_s=30,
13
+ device=DEVICE,
14
+ )
15
+
16
+ # LLaMA Model for Generating Meeting Minutes
17
+ LLAMA = "meta-llama/Llama-2-7b-chat-hf" # Change to your preferred model
18
+ quant_config = BitsAndBytesConfig(
19
+ load_in_4bit=True,
20
+ bnb_4bit_use_double_quant=True,
21
+ bnb_4bit_compute_dtype=torch.bfloat16,
22
+ bnb_4bit_quant_type="nf4"
23
+ )
24
+
25
+ # Load Model & Tokenizer
26
+ tokenizer = AutoTokenizer.from_pretrained(LLAMA)
27
+ tokenizer.pad_token = tokenizer.eos_token
28
+ model = AutoModelForCausalLM.from_pretrained(LLAMA, device_map="auto", quantization_config=quant_config)
29
+
30
+ # Function to Transcribe & Generate Minutes
31
+ def process_audio(audio_file):
32
+ if audio_file is None:
33
+ return "Error: No audio provided!"
34
+
35
+ # Transcribe audio
36
+ transcript = transcriber(audio_file)["text"]
37
+
38
+ # Generate meeting minutes
39
+ system_message = "You are an assistant that produces minutes of meetings from transcripts, with summary, key discussion points, takeaways and action items with owners, in markdown."
40
+ user_prompt = f"Below is an extract transcript of a Denver council meeting. Please write minutes in markdown, including a summary with attendees, location and date; discussion points; takeaways; and action items with owners.\n{transcript}"
41
+
42
+ messages = [
43
+ {"role": "system", "content": system_message},
44
+ {"role": "user", "content": user_prompt}
45
+ ]
46
+
47
+ inputs = tokenizer.apply_chat_template(messages, return_tensors="pt").to(DEVICE)
48
+ streamer = TextStreamer(tokenizer)
49
+ outputs = model.generate(inputs, max_new_tokens=2000, streamer=streamer)
50
+
51
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
52
+
53
+ # Gradio Interface
54
+ interface = gr.Interface(
55
+ fn=process_audio,
56
+ inputs=gr.Audio(sources=["upload", "microphone"], type="filepath"),
57
+ outputs="text",
58
+ title="Meeting Minutes Generator",
59
+ description="Upload or record an audio file to get structured meeting minutes in Markdown.",
60
+ )
61
+
62
+ # Launch App
63
+ interface.launch()