File size: 2,803 Bytes
5036278
 
26fb8f0
433d4b6
 
 
 
 
 
5036278
 
59d13b4
f9c002c
 
5036278
 
 
 
 
 
 
59d13b4
 
 
 
 
 
 
 
 
 
 
 
5036278
2077863
865f75c
 
561e595
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5036278
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import gradio as gr
import torch
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, TextStreamer, AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
from huggingface_hub import login
import os

# Use the secret stored in the Hugging Face space
token = os.getenv("HF_TOKEN")
login(token=token)


DEVICE = "cuda" if torch.cuda.is_available() else "cpu"





# Function to Transcribe & Generate Minutes
def process_audio(audio_file):
    if audio_file is None:
        return "Error: No audio provided!"
    
    # Whisper Model Optimization
    model = "openai/whisper-tiny"
    processor = AutoProcessor.from_pretrained(model)
    
    transcriber = pipeline(
        "automatic-speech-recognition",
        model=model,
        tokenizer=processor.tokenizer,
        feature_extractor=processor.feature_extractor,
        device=0 if torch.cuda.is_available() else "cpu",
    )
    # Transcribe audio
    transcript = transcriber(audio_file,return_timestamps=True)["text"]
    del transcriber 
    del processor
    # LLaMA Model Optimization
    LLAMA = "meta-llama/Llama-3.2-3B-Instruct"
    llama_quant_config = BitsAndBytesConfig(
        load_in_4bit=True,
        bnb_4bit_use_double_quant=True,
        bnb_4bit_compute_dtype=torch.bfloat16,
        bnb_4bit_quant_type="nf4"
    )
    
    tokenizer = AutoTokenizer.from_pretrained(LLAMA)
    tokenizer.pad_token = tokenizer.eos_token
    model = AutoModelForCausalLM.from_pretrained(
        LLAMA,
        torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
        device_map="auto"
    )
    # Generate meeting minutes
    system_message = "You are an assistant that produces minutes of meetings from transcripts, with summary, key discussion points, takeaways and action items with owners, in markdown."
    user_prompt = f"Below is an extract transcript of a Denver council meeting. Please write minutes in markdown, including a summary with attendees, location and date; discussion points; takeaways; and action items with owners.\n{transcript}"

    messages = [
        {"role": "system", "content": system_message},
        {"role": "user", "content": user_prompt}
    ]

    inputs = tokenizer.apply_chat_template(messages, return_tensors="pt").to(DEVICE)
    streamer = TextStreamer(tokenizer)
    outputs = model.generate(inputs, max_new_tokens=2000, streamer=streamer)

    return tokenizer.decode(outputs[0], skip_special_tokens=True)

# Gradio Interface
interface = gr.Interface(
    fn=process_audio,
    inputs=gr.Audio(sources=["upload", "microphone"], type="filepath"),
    outputs="text",
    title="Meeting Minutes Generator",
    description="Upload or record an audio file to get structured meeting minutes in Markdown.",
)

# Launch App
interface.launch()