File size: 2,823 Bytes
5036278
 
f9c002c
5036278
f9c002c
5036278
 
 
f9c002c
 
 
 
 
 
 
 
 
2c77d1a
 
 
f9c002c
 
5036278
 
f9c002c
 
5036278
f9c002c
5036278
 
f9c002c
 
 
5036278
 
 
 
 
 
 
 
2c77d1a
 
 
 
 
5036278
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import gradio as gr
import torch
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, TextStreamer, AutoModelForSpeechSeq2Seq

# Whisper Model Optimization
WHISPER_MODEL = "openai/whisper-large-v3"
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"

whisper_quant_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_use_double_quant=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_quant_type="nf4"
)

whisper_model = AutoModelForSpeechSeq2Seq.from_pretrained(
    WHISPER_MODEL,
    torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,  # Use fp16 if GPU available
    device_map="auto"
) 

whisper_tokenizer = AutoTokenizer.from_pretrained(WHISPER_MODEL)
transcriber = pipeline(
    task="automatic-speech-recognition",
    model=whisper_model,
    tokenizer=whisper_tokenizer,
    chunk_length_s=30,
    device=DEVICE
)

# LLaMA Model Optimization
LLAMA = "meta-llama/Llama-2-7b-chat-hf"
llama_quant_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_use_double_quant=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_quant_type="nf4"
)

tokenizer = AutoTokenizer.from_pretrained(LLAMA)
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained(
    LLAMA,
    torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
    device_map="auto"
)

# Function to Transcribe & Generate Minutes
def process_audio(audio_file):
    if audio_file is None:
        return "Error: No audio provided!"

    # Transcribe audio
    transcript = transcriber(audio_file)["text"]
    
    # Generate meeting minutes
    system_message = "You are an assistant that produces minutes of meetings from transcripts, with summary, key discussion points, takeaways and action items with owners, in markdown."
    user_prompt = f"Below is an extract transcript of a Denver council meeting. Please write minutes in markdown, including a summary with attendees, location and date; discussion points; takeaways; and action items with owners.\n{transcript}"

    messages = [
        {"role": "system", "content": system_message},
        {"role": "user", "content": user_prompt}
    ]

    inputs = tokenizer.apply_chat_template(messages, return_tensors="pt").to(DEVICE)
    streamer = TextStreamer(tokenizer)
    outputs = model.generate(inputs, max_new_tokens=2000, streamer=streamer)

    return tokenizer.decode(outputs[0], skip_special_tokens=True)

# Gradio Interface
interface = gr.Interface(
    fn=process_audio,
    inputs=gr.Audio(sources=["upload", "microphone"], type="filepath"),
    outputs="text",
    title="Meeting Minutes Generator",
    description="Upload or record an audio file to get structured meeting minutes in Markdown.",
)

# Launch App
interface.launch()