Spaces:
Running
on
Zero
Running
on
Zero
import transformers | |
import gradio as gr | |
import librosa | |
import torch | |
import spaces | |
import numpy as np | |
# Maintain conversation history globally | |
conversation_history = [] | |
def transcribe_and_respond(audio_file, chat_history): | |
try: | |
pipe = transformers.pipeline( | |
model='sarvamai/shuka_v1', | |
trust_remote_code=True, | |
device=0, | |
torch_dtype=torch.bfloat16 | |
) | |
# Load the audio file | |
audio, sr = librosa.load(audio_file, sr=16000) | |
# Print audio properties for debugging | |
print(f"Audio dtype: {audio.dtype}, Audio shape: {audio.shape}, Sample rate: {sr}") | |
# Prepare conversation turns | |
turns = chat_history.copy() # Take existing chat history | |
# Add the audio to the current turn | |
turns.append({'role': 'user', 'content': '<|audio|>'}) | |
# Debug: Print the updated turns | |
print(f"Updated turns: {turns}") | |
# Call the model with the updated conversation turns and audio | |
output = pipe({'audio': audio, 'turns': turns, 'sampling_rate': sr}, max_new_tokens=512) | |
# Append the model's response to the chat history | |
turns.append({'role': 'system', 'content': output}) | |
# Debug: Print the model's response | |
print(f"Model output: {output}") | |
return turns, turns # Return updated history to display | |
except Exception as e: | |
return f"Error: {str(e)}" | |
# Chat interface setup | |
iface = gr.Interface( | |
fn=transcribe_and_respond, | |
inputs=[gr.Audio(sources="microphone", type="filepath", label="Your Audio (Microphone)"), gr.Chatbot(label="Conversation")], | |
outputs=gr.Chatbot(label="Conversation"), | |
title="ποΈ AI Chat with Live Transcription", | |
description="Talk to the AI through your microphone, and it will respond conversationally based on the ongoing chat. Keep the conversation going!", | |
live=True, | |
allow_flagging="auto", | |
enable_queue=True | |
) | |
if __name__ == "__main__": | |
iface.launch() | |