gnosticdev's picture
Update app.py
2b31c2c verified
raw
history blame
7.95 kB
import tempfile
import logging
import os
import asyncio
from moviepy.editor import *
import edge_tts
import gradio as gr
from pydub import AudioSegment
# Configuraci贸n de Logs
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
# CONSTANTES DE ARCHIVOS
INTRO_VIDEO = "introvideo.mp4"
OUTRO_VIDEO = "outrovideo.mp4"
MUSIC_BG = "musicafondo.mp3"
EJEMPLO_VIDEO = "ejemplo.mp4"
# Validar existencia de archivos
for file in [INTRO_VIDEO, OUTRO_VIDEO, MUSIC_BG, EJEMPLO_VIDEO]:
if not os.path.exists(file):
logging.error(f"Falta archivo necesario: {file}")
raise FileNotFoundError(f"Falta: {file}")
# Configuraci贸n de chunks
CHUNK_SIZE = 60 # 1 minuto por chunk
SEGMENT_DURATION = 18 # Duraci贸n base de cada segmento
TRANSITION_DURATION = 1.5 # Duraci贸n del efecto slide (ahora m谩s visible)
OVERLAP = TRANSITION_DURATION # El overlap ahora es igual a la duraci贸n de la transici贸n
def eliminar_archivo_tiempo(ruta, delay=1800):
def eliminar():
try:
if os.path.exists(ruta):
os.remove(ruta)
logging.info(f"Archivo eliminado: {ruta}")
except Exception as e:
logging.error(f"Error al eliminar {ruta}: {e}")
from threading import Timer
Timer(delay, eliminar).start()
async def generar_tts(texto, voz, duracion_total):
try:
logging.info("Generando TTS")
communicate = edge_tts.Communicate(texto, voz)
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_tts:
await communicate.save(tmp_tts.name)
tts_audio = AudioFileClip(tmp_tts.name)
# Asegurar que el TTS no exceda la duraci贸n del video
if tts_audio.duration > duracion_total:
tts_audio = tts_audio.subclip(0, duracion_total)
return tts_audio, tmp_tts.name
except Exception as e:
logging.error(f"Fallo en TTS: {str(e)}")
raise
def crear_musica_fondo(duracion_total):
bg_music = AudioSegment.from_mp3(MUSIC_BG)
needed_ms = int(duracion_total * 1000)
repeticiones = needed_ms // len(bg_music) + 1
bg_music = bg_music * repeticiones
bg_music = bg_music[:needed_ms].fade_out(1000)
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_bg:
bg_music.export(tmp_bg.name, format="mp3")
return AudioFileClip(tmp_bg.name).volumex(0.15), tmp_bg.name
def create_slide_transition(clip1, clip2, duration=TRANSITION_DURATION):
"""Transici贸n slide con movimiento m谩s pronunciado"""
# Tomar la 煤ltima parte del clip1 y la primera parte del clip2
part1 = clip1.subclip(clip1.duration - duration)
part2 = clip2.subclip(0, duration)
# Crear animaci贸n de deslizamiento
transition = CompositeVideoClip([
part1.fx(vfx.fadeout, duration),
part2.fx(vfx.fadein, duration).set_position(
lambda t: ('center', 720 - (720 * (t/duration))) # Movimiento desde abajo
)
], size=(1280, 720)).set_duration(duration)
return transition
async def procesar_video(video_input, texto_tts, voz_seleccionada):
temp_files = []
intro, outro, video_original = None, None, None
try:
logging.info("Iniciando procesamiento")
video_original = VideoFileClip(video_input, target_resolution=(720, 1280))
duracion_video = video_original.duration
# Generar TTS y m煤sica de fondo
tts_audio, tts_path = await generar_tts(texto_tts, voz_seleccionada, duracion_video)
bg_audio, bg_path = crear_musica_fondo(duracion_video)
temp_files.extend([tts_path, bg_path])
# Combinar audios
audio_original = video_original.audio.volumex(0.7) if video_original.audio else None
audios = [bg_audio.set_duration(duracion_video)]
if audio_original:
audios.append(audio_original)
audios.append(tts_audio.set_start(0).volumex(0.85))
audio_final = CompositeAudioClip(audios).set_duration(duracion_video)
# Dividir video en segmentos con superposici贸n controlada
segments = []
current_time = 0
while current_time < duracion_video:
end_time = current_time + SEGMENT_DURATION
if end_time > duracion_video:
end_time = duracion_video
segment = video_original.subclip(current_time, end_time)
segments.append(segment)
current_time += (SEGMENT_DURATION - OVERLAP) # Avance considerando el overlap
# Construir video con transiciones
clips = []
for i in range(len(segments)):
if i == 0:
clips.append(segments[i])
else:
# Crear transici贸n entre el final del clip anterior y el inicio del actual
transition = create_slide_transition(clips[-1], segments[i])
clips.pop()
clips.append(transition)
clips.append(segments[i].subclip(TRANSITION_DURATION)) # Eliminar parte ya usada en transici贸n
# Combinar todo
video_final = concatenate_videoclips(clips, method="compose").set_audio(audio_final)
# Agregar intro y outro
intro = VideoFileClip(INTRO_VIDEO, target_resolution=(720, 1280))
outro = VideoFileClip(OUTRO_VIDEO, target_resolution=(720, 1280))
video_final = concatenate_videoclips([intro, video_final, outro], method="compose")
# Renderizado final
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmp:
video_final.write_videofile(
tmp.name,
codec="libx264",
audio_codec="aac",
fps=24,
threads=2,
bitrate="3M",
ffmpeg_params=[
"-preset", "ultrafast",
"-crf", "28",
"-movflags", "+faststart",
"-vf", "scale=1280:720"
],
verbose=False
)
eliminar_archivo_tiempo(tmp.name, 1800)
logging.info(f"Video final guardado: {tmp.name}")
return tmp.name
except Exception as e:
logging.error(f"Fallo general: {str(e)}")
raise
finally:
try:
if video_original:
video_original.close()
if intro:
intro.close()
if outro:
outro.close()
for file in temp_files:
try:
os.remove(file)
except Exception as e:
logging.warning(f"Error limpiando {file}: {e}")
except Exception as e:
logging.warning(f"Error al cerrar recursos: {str(e)}")
# Interfaz Gradio
with gr.Blocks() as demo:
gr.Markdown("# Editor de Video con IA")
with gr.Tab("Principal"):
video_input = gr.Video(label="Subir video")
texto_tts = gr.Textbox(
label="Texto para TTS",
lines=3,
placeholder="Escribe aqu铆 tu texto..."
)
voz_seleccionada = gr.Dropdown(
label="Voz",
choices=["es-ES-AlvaroNeural", "es-MX-BeatrizNeural"],
value="es-ES-AlvaroNeural"
)
procesar_btn = gr.Button("Generar Video")
video_output = gr.Video(label="Video Procesado")
with gr.Accordion("Ejemplos de Uso", open=False):
gr.Examples(
examples=[[EJEMPLO_VIDEO, "隆Hola! Esto es una prueba. Suscr铆bete al canal."]],
inputs=[video_input, texto_tts],
label="Ejemplos"
)
procesar_btn.click(
procesar_video,
inputs=[video_input, texto_tts, voz_seleccionada],
outputs=video_output
)
if __name__ == "__main__":
demo.queue().launch()