gnosticdev's picture
Update app.py
ff29966 verified
raw
history blame
8.34 kB
import tempfile
import logging
import os
import asyncio
from moviepy.editor import *
import edge_tts
import gradio as gr
from pydub import AudioSegment
# Configuraci贸n de Logs
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
# CONSTANTES DE ARCHIVOS
INTRO_VIDEO = "introvideo.mp4"
OUTRO_VIDEO = "outrovideo.mp4"
MUSIC_BG = "musicafondo.mp3"
GLITCH_SOUND = "fxsound.mp3"
EJEMPLO_VIDEO = "ejemplo.mp4"
# Validar existencia de archivos
for file in [INTRO_VIDEO, OUTRO_VIDEO, MUSIC_BG, GLITCH_SOUND, EJEMPLO_VIDEO]:
if not os.path.exists(file):
logging.error(f"Falta archivo necesario: {file}")
raise FileNotFoundError(f"Falta: {file}")
def eliminar_archivo_tiempo(ruta, delay=1800):
def eliminar():
try:
if os.path.exists(ruta):
os.remove(ruta)
logging.info(f"Archivo eliminado: {ruta}")
except Exception as e:
logging.error(f"Error al eliminar {ruta}: {e}")
from threading import Timer
Timer(delay, eliminar).start()
async def procesar_audio(texto, voz, duracion_video, audio_original):
temp_files = []
try:
if not texto.strip():
raise ValueError("El texto para TTS no puede estar vac铆o.")
def dividir_texto(texto, max_length=3000):
return [texto[i:i + max_length] for i in range(0, len(texto), max_length)]
fragmentos = dividir_texto(texto)
audios_tts = []
for fragmento in fragmentos:
communicate = edge_tts.Communicate(fragmento, voz)
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_tts:
await communicate.save(tmp_tts.name)
tts_audio = AudioFileClip(tmp_tts.name)
temp_files.append(tmp_tts.name)
audios_tts.append(tts_audio)
tts_audio_final = concatenate_audioclips(audios_tts)
if tts_audio_final.duration > duracion_video:
tts_audio_final = tts_audio_final.subclip(0, duracion_video)
needed_ms = int(duracion_video * 1000)
bg_music = AudioSegment.from_mp3(MUSIC_BG)
repeticiones = needed_ms // len(bg_music) + 1
bg_music = bg_music * repeticiones
bg_music = bg_music[:needed_ms].fade_out(1000)
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_bg:
bg_music.export(tmp_bg.name, format="mp3")
bg_audio = AudioFileClip(tmp_bg.name).volumex(0.15)
temp_files.append(tmp_bg.name)
audios = [bg_audio.set_duration(duracion_video)]
if audio_original:
audios.append(audio_original.volumex(0.7))
audios.append(tts_audio_final.volumex(0.85).set_start(0))
audio_final = CompositeAudioClip(audios).set_duration(duracion_video)
return audio_final
except Exception as e:
logging.error(f" fallo en audio: {str(e)}")
raise
finally:
for file in temp_files:
try:
os.remove(file)
except Exception as e:
logging.warning(f"Error limpiando {file}: {e}")
def aplicar_glitch(video_clip):
def glitch_effect(frame):
import numpy as np
frame = frame.copy() # Correcci贸n clave para evitar errores
height, width, _ = frame.shape
offset = np.random.randint(5, 15)
if height > 0:
frame[offset:, :] = np.roll(frame[:-offset, :], -offset, axis=0)
return frame
return video_clip.fl_image(glitch_effect)
async def procesar_video(video_input, texto_tts, voz_seleccionada):
try:
# Carga optimizada
intro = VideoFileClip(INTRO_VIDEO, target_resolution=(1080, 1920))
outro = VideoFileClip(OUTRO_VIDEO, target_resolution=(1080, 1920))
video_original = VideoFileClip(video_input, target_resolution=(1080, 1920))
audio_original = video_original.audio
# Liberar recursos
intro.reader.close()
outro.reader.close()
video_original.reader.close()
duracion_video = video_original.duration
audio_final = await procesar_audio(
texto_tts,
voz_seleccionada,
duracion_video,
audio_original
)
# Configuraci贸n de cortes din谩micos
segment_duration = 18 # Duraci贸n visible del segmento
overlap = 2 # Segundos eliminados en cada corte
total_segments = int((duracion_video) // (segment_duration)) + 1
segments = []
glitch_clips = []
glitch_sound = AudioFileClip(GLITCH_SOUND).volumex(0.5)
start_time = 0
for i in range(total_segments):
end_time = start_time + segment_duration + overlap
end_time = min(end_time, duracion_video)
# Extraer segmento completo
full_segment = video_original.subclip(start_time, end_time)
# Aplicar transici贸n/glitch
if i > 0:
# Tomar 0.5 segundos para glitch
glitch_part = full_segment.subclip(0, 0.5)
glitch_part = aplicar_glitch(glitch_part)
# Combinar con el resto del segmento
processed_segment = concatenate_videoclips([
glitch_part,
full_segment.subclip(0.5)
], method="compose")
# Agregar sonido de glitch
glitch_sound_clip = glitch_sound.set_start(start_time)
glitch_clips.append(glitch_sound_clip)
else:
processed_segment = full_segment
segments.append(processed_segment)
start_time += segment_duration # Avanzar sin los 2 segundos de overlap
# Combinar todos los segmentos
video_final = concatenate_videoclips(segments, method="compose")
video_con_audio = video_final.set_audio(audio_final)
# Agregar intro y outro
intro = VideoFileClip(INTRO_VIDEO, target_resolution=(1080, 1920))
outro = VideoFileClip(OUTRO_VIDEO, target_resolution=(1080, 1920))
video_final = concatenate_videoclips([intro, video_con_audio, outro], method="compose")
# Renderizado final
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmp:
video_final.write_videofile(
tmp.name,
codec="libx264",
audio_codec="aac",
fps=video_original.fps,
threads=4,
bitrate="5M",
ffmpeg_params=[
"-preset", "ultrafast",
"-crf", "23",
"-movflags", "+faststart",
"-vf", "scale=1920:1080"
],
verbose=False
)
eliminar_archivo_tiempo(tmp.name, 1800)
return tmp.name
except Exception as e:
logging.error(f" fallo general: {str(e)}")
raise
finally:
try:
intro.close()
outro.close()
video_original.close()
except:
pass
# Interfaz Gradio
with gr.Blocks() as demo:
gr.Markdown("# Editor de Video con IA")
with gr.Tab("Principal"):
video_input = gr.Video(label="Subir video")
texto_tts = gr.Textbox(
label="Texto para TTS",
lines=3,
placeholder="Escribe aqu铆 tu texto..."
)
voz_seleccionada = gr.Dropdown(
label="Voz",
choices=["es-ES-AlvaroNeural", "es-MX-BeatrizNeural"],
value="es-ES-AlvaroNeural"
)
procesar_btn = gr.Button("Generar Video")
video_output = gr.Video(label="Video Procesado")
with gr.Accordion("Ejemplos de Uso", open=False):
gr.Examples(
examples=[[EJEMPLO_VIDEO, "隆Hola! Esto es una prueba. Suscr铆bete al canal."]],
inputs=[video_input, texto_tts],
label="Ejemplos"
)
procesar_btn.click(
procesar_video,
inputs=[video_input, texto_tts, voz_seleccionada],
outputs=video_output
)
if __name__ == "__main__":
demo.queue().launch()