|
import tempfile |
|
import logging |
|
import os |
|
import asyncio |
|
from moviepy.editor import * |
|
import edge_tts |
|
import gradio as gr |
|
from pydub import AudioSegment |
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") |
|
|
|
|
|
INTRO_VIDEO = "introvideo.mp4" |
|
OUTRO_VIDEO = "outrovideo.mp4" |
|
MUSIC_BG = "musicafondo.mp3" |
|
EJEMPLO_VIDEO = "ejemplo.mp4" |
|
|
|
|
|
for file in [INTRO_VIDEO, OUTRO_VIDEO, MUSIC_BG, EJEMPLO_VIDEO]: |
|
if not os.path.exists(file): |
|
logging.error(f"Falta archivo necesario: {file}") |
|
raise FileNotFoundError(f"Falta: {file}") |
|
|
|
|
|
CHUNK_SIZE = 60 |
|
MAX_CHUNKS = 50 |
|
SEGMENT_DURATION = 18 |
|
OVERLAP = 2 |
|
|
|
def eliminar_archivo_tiempo(ruta, delay=1800): |
|
def eliminar(): |
|
try: |
|
if os.path.exists(ruta): |
|
os.remove(ruta) |
|
logging.info(f"Archivo eliminado: {ruta}") |
|
except Exception as e: |
|
logging.error(f"Error al eliminar {ruta}: {e}") |
|
from threading import Timer |
|
Timer(delay, eliminar).start() |
|
|
|
async def generar_tts(texto, voz, duracion_total): |
|
try: |
|
logging.info("Generando TTS") |
|
communicate = edge_tts.Communicate(texto, voz) |
|
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_tts: |
|
await communicate.save(tmp_tts.name) |
|
tts_audio = AudioFileClip(tmp_tts.name) |
|
|
|
|
|
if tts_audio.duration > duracion_total: |
|
tts_audio = tts_audio.subclip(0, duracion_total) |
|
|
|
return tts_audio, tmp_tts.name |
|
except Exception as e: |
|
logging.error(f"Fallo en TTS: {str(e)}") |
|
raise |
|
|
|
def crear_musica_fondo(duracion_total): |
|
bg_music = AudioSegment.from_mp3(MUSIC_BG) |
|
needed_ms = int(duracion_total * 1000) |
|
repeticiones = needed_ms // len(bg_music) + 1 |
|
bg_music = bg_music * repeticiones |
|
bg_music = bg_music[:needed_ms].fade_out(1000) |
|
|
|
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_bg: |
|
bg_music.export(tmp_bg.name, format="mp3") |
|
return AudioFileClip(tmp_bg.name).volumex(0.15), tmp_bg.name |
|
|
|
async def procesar_video(video_input, texto_tts, voz_seleccionada): |
|
temp_files = [] |
|
intro, outro, video_original = None, None, None |
|
try: |
|
logging.info("Iniciando procesamiento") |
|
video_original = VideoFileClip(video_input, target_resolution=(720, 1280)) |
|
duracion_video = video_original.duration |
|
|
|
|
|
tts_audio, tts_path = await generar_tts(texto_tts, voz_seleccionada, duracion_video) |
|
bg_audio, bg_path = crear_musica_fondo(duracion_video) |
|
temp_files.extend([tts_path, bg_path]) |
|
|
|
|
|
audio_original = video_original.audio.volumex(0.7) if video_original.audio else None |
|
audios = [bg_audio.set_duration(duracion_video)] |
|
if audio_original: |
|
audios.append(audio_original) |
|
audios.append(tts_audio.set_start(0).volumex(0.85)) |
|
audio_final = CompositeAudioClip(audios).set_duration(duracion_video) |
|
|
|
|
|
segments = [] |
|
current_time = 0 |
|
while current_time < duracion_video: |
|
end_time = current_time + SEGMENT_DURATION |
|
if end_time > duracion_video: |
|
break |
|
|
|
|
|
full_segment = video_original.subclip(current_time, end_time) |
|
if segments and full_segment.duration >= OVERLAP: |
|
full_segment = full_segment.subclip(0, full_segment.duration - OVERLAP) |
|
|
|
segments.append(full_segment) |
|
current_time += (SEGMENT_DURATION - OVERLAP) |
|
|
|
|