import gradio as gr import torch import numpy as np from kokoro import KModel, KPipeline # Check if CUDA is available CUDA_AVAILABLE = torch.cuda.is_available() # Initialize the model model = KModel().to('cuda' if CUDA_AVAILABLE else 'cpu').eval() # Initialize pipelines for different language codes (using 'a' for English) pipelines = {'a': KPipeline(lang_code='a', model=False)} # Custom pronunciation for "kokoro" pipelines['a'].g2p.lexicon.golds['kokoro'] = 'kˈOkəɹO' def text_to_audio(text, speed=1.0): """Convert text to audio using Kokoro model""" if not text: return None pipeline = pipelines['a'] # Use English pipeline voice = "af_heart" # Default voice (US English, female, Heart) # Process the text pack = pipeline.load_voice(voice) for _, ps, _ in pipeline(text, voice, speed): ref_s = pack[len(ps)-1] # Generate audio try: audio = model(ps, ref_s, speed) except Exception as e: raise gr.Error(f"Error generating audio: {str(e)}") # Return the audio with 24kHz sample rate return 24000, audio.numpy() return None # Create Gradio interface with gr.Blocks(title="Kokoro Text-to-Audio") as app: gr.Markdown("# 🎵 Kokoro Text-to-Audio Converter") gr.Markdown("Convert text to speech using the Kokoro-82M model") with gr.Row(): with gr.Column(): text_input = gr.Textbox( label="Enter your text", placeholder="Type something to convert to audio...", lines=5 ) speed_slider = gr.Slider( minimum=0.5, maximum=2.0, value=1.0, step=0.1, label="Speech Speed" ) submit_btn = gr.Button("Generate Audio") with gr.Column(): audio_output = gr.Audio(label="Generated Audio", type="numpy") submit_btn.click( fn=text_to_audio, inputs=[text_input, speed_slider], outputs=[audio_output] ) gr.Markdown("### Usage Tips") gr.Markdown("- For best results, keep your text reasonably short (up to ~500 characters)") gr.Markdown("- Adjust the speed slider to modify the pace of speech") gr.Markdown("- The model may take a moment to load on first use") # Launch the app if __name__ == "__main__": app.launch()