Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -25,6 +25,8 @@ for file in [INTRO_VIDEO, OUTRO_VIDEO, MUSIC_BG, EJEMPLO_VIDEO]:
|
|
25 |
# Configuraci贸n de chunks
|
26 |
CHUNK_SIZE = 60 # 1 minuto por chunk
|
27 |
MAX_CHUNKS = 50
|
|
|
|
|
28 |
|
29 |
def eliminar_archivo_tiempo(ruta, delay=1800):
|
30 |
def eliminar():
|
@@ -86,98 +88,20 @@ async def procesar_video(video_input, texto_tts, voz_seleccionada):
|
|
86 |
audios.append(tts_audio.set_start(0).volumex(0.85))
|
87 |
audio_final = CompositeAudioClip(audios).set_duration(duracion_video)
|
88 |
|
89 |
-
# Dividir video en
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
# Procesar cada chunk
|
97 |
-
processed_clips = []
|
98 |
-
for chunk in chunks:
|
99 |
-
processed_clips.append(chunk)
|
100 |
-
|
101 |
-
# Combinar chunks (sin efectos)
|
102 |
-
video_final = concatenate_videoclips(processed_clips, method="compose")
|
103 |
-
video_final = video_final.set_audio(audio_final)
|
104 |
-
|
105 |
-
# Agregar intro y outro
|
106 |
-
intro = VideoFileClip(INTRO_VIDEO, target_resolution=(720, 1280))
|
107 |
-
outro = VideoFileClip(OUTRO_VIDEO, target_resolution=(720, 1280))
|
108 |
-
video_final = concatenate_videoclips([intro, video_final, outro], method="compose")
|
109 |
-
|
110 |
-
# Renderizado final
|
111 |
-
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmp:
|
112 |
-
video_final.write_videofile(
|
113 |
-
tmp.name,
|
114 |
-
codec="libx264",
|
115 |
-
audio_codec="aac",
|
116 |
-
fps=24,
|
117 |
-
threads=2,
|
118 |
-
bitrate="3M",
|
119 |
-
ffmpeg_params=[
|
120 |
-
"-preset", "ultrafast",
|
121 |
-
"-crf", "28",
|
122 |
-
"-movflags", "+faststart",
|
123 |
-
"-vf", "scale=1280:720"
|
124 |
-
],
|
125 |
-
verbose=False
|
126 |
-
)
|
127 |
-
eliminar_archivo_tiempo(tmp.name, 1800)
|
128 |
-
logging.info(f"Video final guardado: {tmp.name}")
|
129 |
-
return tmp.name
|
130 |
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
if outro:
|
141 |
-
outro.close()
|
142 |
-
for file in temp_files:
|
143 |
-
try:
|
144 |
-
os.remove(file)
|
145 |
-
except Exception as e:
|
146 |
-
logging.warning(f"Error limpiando {file}: {e}")
|
147 |
-
except Exception as e:
|
148 |
-
logging.warning(f"Error al cerrar recursos: {str(e)}")
|
149 |
-
|
150 |
-
# Interfaz Gradio
|
151 |
-
with gr.Blocks() as demo:
|
152 |
-
gr.Markdown("# Editor de Video con IA")
|
153 |
-
|
154 |
-
with gr.Tab("Principal"):
|
155 |
-
video_input = gr.Video(label="Subir video")
|
156 |
-
texto_tts = gr.Textbox(
|
157 |
-
label="Texto para TTS",
|
158 |
-
lines=3,
|
159 |
-
placeholder="Escribe aqu铆 tu texto..."
|
160 |
-
)
|
161 |
-
voz_seleccionada = gr.Dropdown(
|
162 |
-
label="Voz",
|
163 |
-
choices=["es-ES-AlvaroNeural", "es-MX-BeatrizNeural"],
|
164 |
-
value="es-ES-AlvaroNeural"
|
165 |
-
)
|
166 |
-
procesar_btn = gr.Button("Generar Video")
|
167 |
-
video_output = gr.Video(label="Video Procesado")
|
168 |
-
|
169 |
-
with gr.Accordion("Ejemplos de Uso", open=False):
|
170 |
-
gr.Examples(
|
171 |
-
examples=[[EJEMPLO_VIDEO, "隆Hola! Esto es una prueba. Suscr铆bete al canal."]],
|
172 |
-
inputs=[video_input, texto_tts],
|
173 |
-
label="Ejemplos"
|
174 |
-
)
|
175 |
-
|
176 |
-
procesar_btn.click(
|
177 |
-
procesar_video,
|
178 |
-
inputs=[video_input, texto_tts, voz_seleccionada],
|
179 |
-
outputs=video_output
|
180 |
-
)
|
181 |
-
|
182 |
-
if __name__ == "__main__":
|
183 |
-
demo.queue().launch()
|
|
|
25 |
# Configuraci贸n de chunks
|
26 |
CHUNK_SIZE = 60 # 1 minuto por chunk
|
27 |
MAX_CHUNKS = 50
|
28 |
+
SEGMENT_DURATION = 18 # Duraci贸n de cada segmento
|
29 |
+
OVERLAP = 2 # Segundos a eliminar entre segmentos
|
30 |
|
31 |
def eliminar_archivo_tiempo(ruta, delay=1800):
|
32 |
def eliminar():
|
|
|
88 |
audios.append(tts_audio.set_start(0).volumex(0.85))
|
89 |
audio_final = CompositeAudioClip(audios).set_duration(duracion_video)
|
90 |
|
91 |
+
# Dividir video en segmentos con cortes de 2 segundos
|
92 |
+
segments = []
|
93 |
+
current_time = 0
|
94 |
+
while current_time < duracion_video:
|
95 |
+
end_time = current_time + SEGMENT_DURATION
|
96 |
+
if end_time > duracion_video:
|
97 |
+
break # Terminar si ya no hay suficiente tiempo
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
98 |
|
99 |
+
# Extraer segmento eliminando 2 segundos al final
|
100 |
+
full_segment = video_original.subclip(current_time, end_time)
|
101 |
+
if segments and full_segment.duration >= OVERLAP:
|
102 |
+
full_segment = full_segment.subclip(0, full_segment.duration - OVERLAP)
|
103 |
+
|
104 |
+
segments.append(full_segment)
|
105 |
+
current_time += (SEGMENT_DURATION - OVERLAP)
|
106 |
+
|
107 |
+
# Aseg
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|