gnosticdev's picture
Update app.py
d97f0d8 verified
raw
history blame
9.56 kB
import tempfile
import logging
import os
import asyncio
from moviepy.editor import *
import edge_tts
import gradio as gr
from pydub import AudioSegment
# Configuración de Logs
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
# CONSTANTES DE ARCHIVOS
INTRO_VIDEO = "introvideo.mp4"
OUTRO_VIDEO = "outrovideo.mp4"
MUSIC_BG = "musicafondo.mp3"
GLITCH_SOUND = "fxsound.mp3"
EJEMPLO_VIDEO = "ejemplo.mp4"
# Validar existencia de archivos
for file in [INTRO_VIDEO, OUTRO_VIDEO, MUSIC_BG, GLITCH_SOUND, EJEMPLO_VIDEO]:
if not os.path.exists(file):
logging.error(f"Falta archivo necesario: {file}")
raise FileNotFoundError(f"Falta: {file}")
# Configuración de chunks
CHUNK_SIZE = 60 # 1 minuto por chunk
MAX_CHUNKS = 50
def eliminar_archivo_tiempo(ruta, delay=1800):
def eliminar():
try:
if os.path.exists(ruta):
os.remove(ruta)
logging.info(f"Archivo eliminado: {ruta}")
except Exception as e:
logging.error(f"Error al eliminar {ruta}: {e}")
from threading import Timer
Timer(delay, eliminar).start()
async def procesar_audio(texto, voz, duracion_video, audio_original=None):
temp_files = []
try:
logging.info("Iniciando procesamiento de audio")
if not texto.strip():
raise ValueError("El texto para TTS no puede estar vacío.")
def dividir_texto(texto, max_length=2000):
return [texto[i:i + max_length] for i in range(0, len(texto), max_length)]
fragmentos = dividir_texto(texto)
audios_tts = []
for fragmento in fragmentos:
communicate = edge_tts.Communicate(fragmento, voz)
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_tts:
await communicate.save(tmp_tts.name)
tts_audio = AudioFileClip(tmp_tts.name)
temp_files.append(tmp_tts.name)
audios_tts.append(tts_audio)
tts_audio_final = concatenate_audioclips(audios_tts)
if tts_audio_final.duration > duracion_video:
tts_audio_final = tts_audio_final.subclip(0, duracion_video)
needed_ms = int(duracion_video * 1000)
bg_music = AudioSegment.from_mp3(MUSIC_BG)
repeticiones = needed_ms // len(bg_music) + 1
bg_music = bg_music * repeticiones
bg_music = bg_music[:needed_ms].fade_out(1000)
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_bg:
bg_music.export(tmp_bg.name, format="mp3")
bg_audio = AudioFileClip(tmp_bg.name).volumex(0.15)
temp_files.append(tmp_bg.name)
audios = [bg_audio.set_duration(duracion_video)]
if audio_original:
audios.append(audio_original.volumex(0.7))
audios.append(tts_audio_final.volumex(0.85).set_start(0))
audio_final = CompositeAudioClip(audios).set_duration(duracion_video)
logging.info("Audio procesado exitosamente")
return audio_final, temp_files # Retornamos los archivos temporales para limpieza
except Exception as e:
logging.error(f"Fallo en procesamiento de audio: {str(e)}")
raise
def aplicar_glitch(video_clip):
def glitch_effect(frame):
import numpy as np
frame = frame.copy()
height, width, _ = frame.shape
offset = np.random.randint(3, 8)
if height > 0 and offset != 0:
frame[offset:, :] = np.roll(frame[:-offset, :], -offset, axis=0)
return frame
return video_clip.fl_image(glitch_effect)
async def procesar_fragmento(chunk, texto_tts, voz_seleccionada, start_time):
temp_files = []
try:
audio_original = chunk.audio
duracion_chunk = chunk.duration
# Si el chunk es demasiado corto, devolver sin procesar
if duracion_chunk <= 18:
audio_final, temp_files = await procesar_audio(
texto_tts,
voz_seleccionada,
duracion_chunk,
audio_original
)
return chunk.set_audio(audio_final), temp_files
audio_final, temp_files = await procesar_audio(
texto_tts,
voz_seleccionada,
duracion_chunk,
audio_original
)
segment_duration = 18
overlap = 2
segments = []
glitch_clips = []
glitch_sound = AudioFileClip(GLITCH_SOUND).volumex(0.5)
current_time = 0
while current_time < duracion_chunk:
end_time = current_time + segment_duration
end_time = min(end_time, duracion_chunk)
full_segment = chunk.subclip(current_time, end_time)
if current_time > 0 and full_segment.duration >= 0.5:
glitch_part = full_segment.subclip(0, 0.5)
glitch_part = aplicar_glitch(glitch_part)
processed_segment = concatenate_videoclips([
glitch_part,
full_segment.subclip(0.5)
], method="compose")
glitch_sound_clip = glitch_sound.set_start(start_time + current_time)
glitch_clips.append(glitch_sound_clip)
else:
processed_segment = full_segment
segments.append(processed_segment)
current_time += (segment_duration - overlap)
video_chunk = concatenate_videoclips(segments, method="compose")
video_chunk = video_chunk.set_audio(audio_final)
return video_chunk, temp_files
except Exception as e:
logging.error(f"Fallo procesando fragmento: {str(e)}")
raise
async def procesar_video(video_input, texto_tts, voz_seleccionada):
temp_files = []
try:
logging.info("Iniciando procesamiento de video")
video_original = VideoFileClip(video_input, target_resolution=(720, 1280))
total_duration = video_original.duration
# Dividir en chunks
chunks = []
for start in range(0, int(total_duration), CHUNK_SIZE):
end = min(start + CHUNK_SIZE, total_duration)
chunk = video_original.subclip(start, end)
chunks.append((start, chunk))
# Procesar cada chunk
processed_clips = []
for i, (start_time, chunk) in enumerate(chunks):
logging.info(f"Procesando chunk {i+1}/{len(chunks)}")
processed_chunk, chunk_temp_files = await procesar_fragmento(chunk, texto_tts, voz_seleccionada, start_time)
processed_clips.append(processed_chunk)
temp_files.extend(chunk_temp_files)
# Combinar chunks
final_video = concatenate_videoclips(processed_clips, method="compose")
# Agregar intro y outro
intro = VideoFileClip(INTRO_VIDEO, target_resolution=(720, 1280))
outro = VideoFileClip(OUTRO_VIDEO, target_resolution=(720, 1280))
final_video = concatenate_videoclips([intro, final_video, outro], method="compose")
# Renderizado final
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmp:
final_video.write_videofile(
tmp.name,
codec="libx264",
audio_codec="aac",
fps=24,
threads=2,
bitrate="3M",
ffmpeg_params=[
"-preset", "ultrafast",
"-crf", "28",
"-movflags", "+faststart",
"-vf", "scale=1280:720"
],
verbose=False
)
eliminar_archivo_tiempo(tmp.name, 1800)
logging.info(f"Video final guardado: {tmp.name}")
return tmp.name
except Exception as e:
logging.error(f"Fallo general: {str(e)}")
raise
finally:
try:
# Cerrar recursos solo después de completar todo el procesamiento
video_original.close()
intro.close()
outro.close()
for file in temp_files:
try:
os.remove(file)
except Exception as e:
logging.warning(f"Error limpiando {file}: {e}")
except Exception as e:
logging.warning(f"Error al cerrar recursos: {str(e)}")
# Interfaz Gradio
with gr.Blocks() as demo:
gr.Markdown("# Editor de Video con IA")
with gr.Tab("Principal"):
video_input = gr.Video(label="Subir video")
texto_tts = gr.Textbox(
label="Texto para TTS",
lines=3,
placeholder="Escribe aquí tu texto..."
)
voz_seleccionada = gr.Dropdown(
label="Voz",
choices=["es-ES-AlvaroNeural", "es-MX-BeatrizNeural"],
value="es-ES-AlvaroNeural"
)
procesar_btn = gr.Button("Generar Video")
video_output = gr.Video(label="Video Procesado")
with gr.Accordion("Ejemplos de Uso", open=False):
gr.Examples(
examples=[[EJEMPLO_VIDEO, "¡Hola! Esto es una prueba. Suscríbete al canal."]],
inputs=[video_input, texto_tts],
label="Ejemplos"
)
procesar_btn.click(
procesar_video,
inputs=[video_input, texto_tts, voz_seleccionada],
outputs=video_output
)
if __name__ == "__main__":
demo.queue().launch()