import sys
import os
import time
import math
import torch
import spaces
# By using XTTS you agree to CPML license https://coqui.ai/cpml
os.environ["COQUI_TOS_AGREED"] = "1"

import gradio as gr
from TTS.api import TTS
from TTS.utils.manage import ModelManager
model_names = TTS().list_models()
print(model_names.__dict__)
print(model_names.__dir__())
model_name = "tts_models/multilingual/multi-dataset/xtts_v2"
m = model_name

# Automatic device detection
if torch.cuda.is_available():
    # cuda only
    device_type = "cuda"
    device_selection = "cuda:0"
    data_type = torch.float16
else:
    # no GPU or Amd
    device_type = "cpu"
    device_selection = "cpu"
    data_type = torch.float32

tts = TTS(model_name, gpu=torch.cuda.is_available())
tts.to(device_type)

def predict(prompt, language, gender, audio_file_pth, mic_file_path, use_mic):
    start = time.time()
    if len(prompt) < 2:
        gr.Warning("Please give a longer prompt text")
        return (
                None,
                None,
                None,
            )
    if 50000 < len(prompt):
        gr.Warning("Text length limited to 50,000 characters for this demo, please try shorter text")
        return (
            None,
            None,
            None,
        )

    if use_mic:
        if mic_file_path is None:
            gr.Warning("Please record your voice with Microphone, or uncheck Use Microphone to use reference audios")
            return (
                None,
                None,
                None,
            )
        else:
            speaker_wav = mic_file_path
    else:
        speaker_wav = audio_file_pth

    if speaker_wav is None:
        if gender == "male":
            speaker_wav = "examples/female.wav"
        else:
            speaker_wav = "examples/female.wav"
        
    try:
        if language == "fr":
            if m.find("your") != -1:
                language = "fr-fr"
        if m.find("/fr/") != -1:
            language = None
        predict_on_gpu(prompt, speaker_wav, language)
    except RuntimeError as e :
        if "device-assert" in str(e):
            # cannot do anything on cuda device side error, need to restart
            gr.Warning("Unhandled Exception encounter, please retry in a minute")
            print("Cuda device-assert Runtime encountered need restart")
            sys.exit("Exit due to cuda device-assert")
        else:
            raise e
        
    end = time.time()
    secondes = int(end - start)
    minutes = math.floor(secondes / 60)
    secondes = secondes - (minutes * 60)
    hours = math.floor(minutes / 60)
    minutes = minutes - (hours * 60)
    is_randomize_seed = False
    information = ("Start again to get a different result. " if is_randomize_seed else "") + "The sound has been generated in " + ((str(hours) + " h, ") if hours != 0 else "") + ((str(minutes) + " min, ") if hours != 0 or minutes != 0 else "") + str(secondes) + " sec."

    return (
        gr.make_waveform(
            audio="output.wav",
        ),
        "output.wav",
        information,
    )

@spaces.GPU(duration=60)
def predict_on_gpu(prompt, speaker_wav, language):
    tts.tts_to_file(
        text=prompt,
        file_path="output.wav",
        speaker_wav=speaker_wav,
        language=language
    )

with gr.Blocks() as interface:
    gr.HTML("Multi-language Text-to-Speech")
    gr.HTML(
        """
<a href="https://huggingface.co/coqui/XTTS-v1">XTTS</a> is a Voice generation model that lets you clone voices into different languages by using just a quick 3-second audio clip. 
<br/>
XTTS is built on previous research, like Tortoise, with additional architectural innovations and training to make cross-language voice cloning and multilingual speech generation possible. 
<br/>
This is the same model that powers our creator application <a href="https://coqui.ai">Coqui Studio</a> as well as the <a href="https://docs.coqui.ai">Coqui API</a>. In production we apply modifications to make low-latency streaming possible.
<br/>
Leave a star on the Github <a href="https://github.com/coqui-ai/TTS">TTS</a>, where our open-source inference and training code lives.
<br/>
<p>For faster inference without waiting in the queue, you should duplicate this space and upgrade to GPU via the settings.
<br/>
<a href="https://huggingface.co/spaces/coqui/xtts?duplicate=true">
<img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
</p>
        """
    )
    with gr.Column():
        prompt = gr.Textbox(
            label="Text Prompt",
            info="One or two sentences at a time is better",
            value="Hello, World! Here is an example of light voice cloning. Try to upload your best audio samples quality",
        )
        language = gr.Dropdown(
            label="Language",
            info="Select an output language for the synthesised speech",
            choices=[
                    ["Arabic", "ar"],
                    ["Brazilian Portuguese", "pt"],
                    ["Mandarin Chinese", "zh-cn"],
                    ["Czech", "cs"],
                    ["Dutch", "nl"],
                    ["English", "en"],
                    ["French", "fr"],
                    ["German", "de"],
                    ["Italian", "it"],
                    ["Polish", "pl"],
                    ["Russian", "ru"],
                    ["Spanish", "es"],
                    ["Turkish", "tr"]
            ],
            max_choices=1,
            value="en",
        )
        gender = gr.Radio(["female", "male"], label="Gender", info="Gender of the voice")
        audio_file_pth = gr.Audio(
            label="Reference Audio",
            #info="Click on the ✎ button to upload your own target speaker audio",
            type="filepath",
            value=None,
        )
        mic_file_path = gr.Audio(sources=["microphone"],
                 type="filepath",
                 #info="Use your microphone to record audio",
                 label="Use Microphone for Reference")
        use_mic = gr.Checkbox(label="Check to use Microphone as Reference",
                    value=False,
                    info="Notice: Microphone input may not work properly under traffic",)
        with gr.Accordion("Advanced options", open = False):
             debug_mode = gr.Checkbox(label = "Debug mode", value = False, info = "Show intermediate results")

        submit = gr.Button("🚀 Speak", variant = "primary")

        waveform_visual = gr.Video(label="Waveform Visual", autoplay=True)
        synthesised_audio = gr.Audio(label="Synthesised Audio", autoplay=False)
        information = gr.HTML()

    submit.click(predict, inputs = [
        prompt, language, gender, audio_file_pth, mic_file_path, use_mic
    ], outputs = [
        waveform_visual,
        synthesised_audio,
        information
    ], scroll_to_output = True)

interface.queue().launch(debug=True)