File size: 965 Bytes
2ed7223
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import transformers
import librosa
import gradio as gr
import spaces

pipe = transformers.pipeline(
    model='sarvamai/shuka_v1',
    trust_remote_code=True,
    device=0,
    torch_dtype='bfloat16'
)

@spaces.GPU(duration=120)
def transcribe_and_respond(audio_file):
    audio, sr = librosa.load(audio_file, sr=16000)
    
    turns = [
        {'role': 'system', 'content': 'Respond naturally and informatively.'},
        {'role': 'user', 'content': ''}
    ]
    
    response = pipe({'audio': audio, 'turns': turns, 'sampling_rate': sr}, max_new_tokens=512)
    
    return response

iface = gr.Interface(
    fn=transcribe_and_respond,
    inputs=gr.Audio(source="microphone", type="filepath"),  # Use the microphone for audio input
    outputs="text",  # The output will be a text response
    title="Voice Input for Transcription and Response",
    description="Record your voice, and the model will respond naturally and informatively."
)

iface.launch()