Spaces:
				
			
			
	
			
			
		Sleeping
		
	
	
	
			
			
	
	
	
	
		
		
		Sleeping
		
	Sync from GitHub repo
Browse filesThis Space is synced from the GitHub repo: https://github.com/SWivid/F5-TTS. Please submit contributions to the Space there
    	
        app.py
    CHANGED
    
    | 
         @@ -454,13 +454,31 @@ Have a conversation with an AI using your reference voice! 
     | 
|
| 454 | 
         
             
            """
         
     | 
| 455 | 
         
             
                )
         
     | 
| 456 | 
         | 
| 457 | 
         
            -
                 
     | 
| 
         | 
|
| 458 | 
         | 
| 459 | 
         
            -
             
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 460 | 
         | 
| 461 | 
         
            -
                @gpu_decorator
         
     | 
| 462 | 
         
            -
                def load_chat_model():
         
     | 
| 463 | 
         
            -
                    global chat_model_state, chat_tokenizer_state
         
     | 
| 464 | 
         
             
                    if chat_model_state is None:
         
     | 
| 465 | 
         
             
                        show_info = gr.Info
         
     | 
| 466 | 
         
             
                        show_info("Loading chat model...")
         
     | 
| 
         @@ -469,10 +487,6 @@ Have a conversation with an AI using your reference voice! 
     | 
|
| 469 | 
         
             
                        chat_tokenizer_state = AutoTokenizer.from_pretrained(model_name)
         
     | 
| 470 | 
         
             
                        show_info("Chat model loaded.")
         
     | 
| 471 | 
         | 
| 472 | 
         
            -
                    return gr.update(visible=False), gr.update(visible=True)
         
     | 
| 473 | 
         
            -
             
     | 
| 474 | 
         
            -
                load_chat_model_btn.click(load_chat_model, outputs=[load_chat_model_btn, chat_interface_container])
         
     | 
| 475 | 
         
            -
             
     | 
| 476 | 
         
             
                with chat_interface_container:
         
     | 
| 477 | 
         
             
                    with gr.Row():
         
     | 
| 478 | 
         
             
                        with gr.Column():
         
     | 
| 
         | 
|
| 454 | 
         
             
            """
         
     | 
| 455 | 
         
             
                )
         
     | 
| 456 | 
         | 
| 457 | 
         
            +
                if not USING_SPACES:
         
     | 
| 458 | 
         
            +
                    load_chat_model_btn = gr.Button("Load Chat Model", variant="primary")
         
     | 
| 459 | 
         | 
| 460 | 
         
            +
                    chat_interface_container = gr.Column(visible=False)
         
     | 
| 461 | 
         
            +
             
     | 
| 462 | 
         
            +
                    @gpu_decorator
         
     | 
| 463 | 
         
            +
                    def load_chat_model():
         
     | 
| 464 | 
         
            +
                        global chat_model_state, chat_tokenizer_state
         
     | 
| 465 | 
         
            +
                        if chat_model_state is None:
         
     | 
| 466 | 
         
            +
                            show_info = gr.Info
         
     | 
| 467 | 
         
            +
                            show_info("Loading chat model...")
         
     | 
| 468 | 
         
            +
                            model_name = "Qwen/Qwen2.5-3B-Instruct"
         
     | 
| 469 | 
         
            +
                            chat_model_state = AutoModelForCausalLM.from_pretrained(
         
     | 
| 470 | 
         
            +
                                model_name, torch_dtype="auto", device_map="auto"
         
     | 
| 471 | 
         
            +
                            )
         
     | 
| 472 | 
         
            +
                            chat_tokenizer_state = AutoTokenizer.from_pretrained(model_name)
         
     | 
| 473 | 
         
            +
                            show_info("Chat model loaded.")
         
     | 
| 474 | 
         
            +
             
     | 
| 475 | 
         
            +
                        return gr.update(visible=False), gr.update(visible=True)
         
     | 
| 476 | 
         
            +
             
     | 
| 477 | 
         
            +
                    load_chat_model_btn.click(load_chat_model, outputs=[load_chat_model_btn, chat_interface_container])
         
     | 
| 478 | 
         
            +
             
     | 
| 479 | 
         
            +
                else:
         
     | 
| 480 | 
         
            +
                    chat_interface_container = gr.Column()
         
     | 
| 481 | 
         | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 482 | 
         
             
                    if chat_model_state is None:
         
     | 
| 483 | 
         
             
                        show_info = gr.Info
         
     | 
| 484 | 
         
             
                        show_info("Loading chat model...")
         
     | 
| 
         | 
|
| 487 | 
         
             
                        chat_tokenizer_state = AutoTokenizer.from_pretrained(model_name)
         
     | 
| 488 | 
         
             
                        show_info("Chat model loaded.")
         
     | 
| 489 | 
         | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 490 | 
         
             
                with chat_interface_container:
         
     | 
| 491 | 
         
             
                    with gr.Row():
         
     | 
| 492 | 
         
             
                        with gr.Column():
         
     |