waloneai commited on
Commit
00eb3fd
·
verified ·
1 Parent(s): ecc59a5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +131 -97
app.py CHANGED
@@ -10,6 +10,9 @@ import tempfile
10
  import shlex
11
  import shutil
12
 
 
 
 
13
  # Supported models configuration
14
  MODELS = {
15
  "deepseek-ai/DeepSeek-V3": {
@@ -277,116 +280,147 @@ def update(
277
  raise gr.Error(e)
278
 
279
 
 
 
 
 
 
 
280
  with gr.Blocks() as demo:
281
- gr.Markdown(
282
- """
 
 
 
 
 
 
 
 
283
  # 🏞 AI Video Composer (AI နဲ့ Video ပေါင်းစပ်ဖန်တီးမှု နည်းပညာ )
284
  AI အသုံးပြုပြီး ကိုယ့်မှာရှိသည့် ပုံ အသံဖိုင် video ဖိုင် စသည့် media fil များကို ပေါင်းစပ်ပြီး video ထုတ်ခိုင်းနိုင်ပါတယ် and let [Qwen2.5-Code] or [DeepSeek-V3 for Myanmar prompt] generate a new video for you (using FFMPEG).
285
  """,
286
- elem_id="header",
287
- )
288
- with gr.Row():
289
- with gr.Column():
290
- user_files = gr.File(
291
- file_count="multiple",
292
- label="Media files",
293
- file_types=allowed_medias,
294
- )
295
- user_prompt = gr.Textbox(
296
- placeholder="eg: Remove the 3 first seconds of the video",
297
- label="Instructions",
298
- )
299
- btn = gr.Button("Run")
300
- with gr.Accordion("Parameters", open=False):
301
- model_choice = gr.Radio(
302
- choices=list(MODELS.keys()),
303
- value=list(MODELS.keys())[0],
304
- label="Model",
305
  )
306
- top_p = gr.Slider(
307
- minimum=-0,
308
- maximum=1.0,
309
- value=0.7,
310
- step=0.05,
311
- interactive=True,
312
- label="Top-p (nucleus sampling)",
313
  )
314
- temperature = gr.Slider(
315
- minimum=-0,
316
- maximum=5.0,
317
- value=0.1,
318
- step=0.1,
319
- interactive=True,
320
- label="Temperature",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
321
  )
322
- with gr.Column():
323
- generated_video = gr.Video(
324
- interactive=False, label="Generated Video", include_audio=True
325
- )
326
- generated_command = gr.Markdown()
327
 
328
- btn.click(
329
- fn=update,
330
- inputs=[user_files, user_prompt, top_p, temperature, model_choice],
331
- outputs=[generated_video, generated_command],
332
- )
333
- with gr.Row():
334
- gr.Examples(
335
- examples=[
336
- [
337
- ["./examples/ai_talk.wav", "./examples/bg-image.png"],
338
- "Use the image as the background with a waveform visualization for the audio positioned in center of the video.",
339
- 0.7,
340
- 0.1,
341
- (
342
- list(MODELS.keys())[1]
343
- if len(MODELS) > 1
344
- else list(MODELS.keys())[0]
345
- ),
346
- ],
347
- [
348
- ["./examples/ai_talk.wav", "./examples/bg-image.png"],
349
- "Use the image as the background with a waveform visualization for the audio positioned in center of the video. Make sure the waveform has a max height of 250 pixels.",
350
- 0.7,
351
- 0.1,
352
- list(MODELS.keys())[0],
353
- ],
354
- [
355
  [
356
- "./examples/cat1.jpeg",
357
- "./examples/cat2.jpeg",
358
- "./examples/cat3.jpeg",
359
- "./examples/cat4.jpeg",
360
- "./examples/cat5.jpeg",
361
- "./examples/cat6.jpeg",
362
- "./examples/heat-wave.mp3",
 
 
 
 
 
 
 
 
 
 
363
  ],
364
- "နောက်ခံတေးဂီတအဖြစ် အသံဖြင့် cat3 ရုပ်ပုံနှင့် cat4, cat5 ရုပ်ပုံများကို သုံးပါ။ ဗီဒီယိုကြာချိန်ကို အသံကြာချိန်နှင့် ကိုက်ညီအောင်လုပ်ပါ။ cat3 ပုံဖြင့် စပါ။ 10 စက္ကန့်ကြာပြီးနောက် cat4 ပုံကို ပြောင်းပါ။ 20 စက္ကန့်ကြာပြီးနောက် cat5 ပုံကို ပြောင်းပါ။",
365
- 0.7,
366
- 0.1,
367
- (
368
- list(MODELS.keys())[0]
369
- if len(MODELS) > 1
370
- else list(MODELS.keys())[0]
371
- ),
372
  ],
373
- ],
374
- inputs=[user_files, user_prompt, top_p, temperature, model_choice],
375
- outputs=[generated_video, generated_command],
376
- fn=update,
377
- run_on_click=True,
378
- cache_examples=False,
379
- )
380
 
381
- with gr.Row():
382
- gr.Markdown(
383
- """
384
- If you have idea to improve this please open a PR:
385
 
386
- [![Open a Pull Request](https://huggingface.co/datasets/huggingface/badges/raw/main/open-a-pr-lg-light.svg)](https://huggingface.co/spaces/huggingface-projects/video-composer-gpt4/discussions)
387
- """,
388
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
389
 
390
  # Launch the app with embedding enabled
391
  demo.queue(default_concurrency_limit=200)
392
- demo.launch(show_api=False, ssr_mode=True, share=True, inline=True)
 
10
  import shlex
11
  import shutil
12
 
13
+ # Predefined password
14
+ PASSWORD = "your_password_here" # Change this to your desired password
15
+
16
  # Supported models configuration
17
  MODELS = {
18
  "deepseek-ai/DeepSeek-V3": {
 
280
  raise gr.Error(e)
281
 
282
 
283
+ # Password protection logic
284
+ def check_password(password):
285
+ return password == PASSWORD
286
+
287
+
288
+ # Gradio app with password protection
289
  with gr.Blocks() as demo:
290
+ # Login interface
291
+ with gr.Row():
292
+ password_input = gr.Textbox(label="Enter Password", type="password")
293
+ login_button = gr.LoginButton(label="Login")
294
+ logout_button = gr.LogoutButton(label="Logout")
295
+
296
+ # Main app interface (hidden by default)
297
+ with gr.Row(visible=False) as main_interface:
298
+ gr.Markdown(
299
+ """
300
  # 🏞 AI Video Composer (AI နဲ့ Video ပေါင်းစပ်ဖန်တီးမှု နည်းပညာ )
301
  AI အသုံးပြုပြီး ကိုယ့်မှာရှိသည့် ပုံ အသံဖိုင် video ဖိုင် စသည့် media fil များကို ပေါင်းစပ်ပြီး video ထုတ်ခိုင်းနိုင်ပါတယ် and let [Qwen2.5-Code] or [DeepSeek-V3 for Myanmar prompt] generate a new video for you (using FFMPEG).
302
  """,
303
+ elem_id="header",
304
+ )
305
+ with gr.Row():
306
+ with gr.Column():
307
+ user_files = gr.File(
308
+ file_count="multiple",
309
+ label="Media files",
310
+ file_types=allowed_medias,
 
 
 
 
 
 
 
 
 
 
 
311
  )
312
+ user_prompt = gr.Textbox(
313
+ placeholder="eg: Remove the 3 first seconds of the video",
314
+ label="Instructions",
 
 
 
 
315
  )
316
+ btn = gr.Button("Run")
317
+ with gr.Accordion("Parameters", open=False):
318
+ model_choice = gr.Radio(
319
+ choices=list(MODELS.keys()),
320
+ value=list(MODELS.keys())[0],
321
+ label="Model",
322
+ )
323
+ top_p = gr.Slider(
324
+ minimum=-0,
325
+ maximum=1.0,
326
+ value=0.7,
327
+ step=0.05,
328
+ interactive=True,
329
+ label="Top-p (nucleus sampling)",
330
+ )
331
+ temperature = gr.Slider(
332
+ minimum=-0,
333
+ maximum=5.0,
334
+ value=0.1,
335
+ step=0.1,
336
+ interactive=True,
337
+ label="Temperature",
338
+ )
339
+ with gr.Column():
340
+ generated_video = gr.Video(
341
+ interactive=False, label="Generated Video", include_audio=True
342
  )
343
+ generated_command = gr.Markdown()
 
 
 
 
344
 
345
+ btn.click(
346
+ fn=update,
347
+ inputs=[user_files, user_prompt, top_p, temperature, model_choice],
348
+ outputs=[generated_video, generated_command],
349
+ )
350
+ with gr.Row():
351
+ gr.Examples(
352
+ examples=[
353
+ [
354
+ ["./examples/ai_talk.wav", "./examples/bg-image.png"],
355
+ "Use the image as the background with a waveform visualization for the audio positioned in center of the video.",
356
+ 0.7,
357
+ 0.1,
358
+ (
359
+ list(MODELS.keys())[1]
360
+ if len(MODELS) > 1
361
+ else list(MODELS.keys())[0]
362
+ ),
363
+ ],
364
+ [
365
+ ["./examples/ai_talk.wav", "./examples/bg-image.png"],
366
+ "Use the image as the background with a waveform visualization for the audio positioned in center of the video. Make sure the waveform has a max height of 250 pixels.",
367
+ 0.7,
368
+ 0.1,
369
+ list(MODELS.keys())[0],
370
+ ],
 
371
  [
372
+ [
373
+ "./examples/cat1.jpeg",
374
+ "./examples/cat2.jpeg",
375
+ "./examples/cat3.jpeg",
376
+ "./examples/cat4.jpeg",
377
+ "./examples/cat5.jpeg",
378
+ "./examples/cat6.jpeg",
379
+ "./examples/heat-wave.mp3",
380
+ ],
381
+ "နောက်ခံတေးဂီတအဖြစ် အသံဖြင့် cat3 ရုပ်ပုံနှင့် cat4, cat5 ရုပ်ပုံများကို သုံးပါ။ ဗီဒီယိုကြာချိန်ကို အသံကြာချိန်နှင့် ကိုက်ညီအောင်လုပ်ပါ။ cat3 ပုံဖြင့် စပါ။ 10 စက္ကန့်ကြာပြီးနောက် cat4 ပုံကို ပြောင်းပါ။ 20 စက္ကန့်ကြာပြီးနောက် cat5 ပုံကို ပြောင်းပါ။",
382
+ 0.7,
383
+ 0.1,
384
+ (
385
+ list(MODELS.keys())[0]
386
+ if len(MODELS) > 1
387
+ else list(MODELS.keys())[0]
388
+ ),
389
  ],
 
 
 
 
 
 
 
 
390
  ],
391
+ inputs=[user_files, user_prompt, top_p, temperature, model_choice],
392
+ outputs=[generated_video, generated_command],
393
+ fn=update,
394
+ run_on_click=True,
395
+ cache_examples=False,
396
+ )
 
397
 
398
+ with gr.Row():
399
+ gr.Markdown(
400
+ """
401
+ If you have idea to improve this please open a PR:
402
 
403
+ [![Open a Pull Request](https://huggingface.co/datasets/huggingface/badges/raw/main/open-a-pr-lg-light.svg)](https://huggingface.co/spaces/huggingface-projects/video-composer-gpt4/discussions)
404
+ """,
405
+ )
406
+
407
+ # Show/hide main interface based on login status
408
+ def toggle_interface(is_logged_in):
409
+ return gr.update(visible=is_logged_in)
410
+
411
+ login_button.click(
412
+ fn=check_password,
413
+ inputs=password_input,
414
+ outputs=None,
415
+ success=toggle_interface,
416
+ queue=False,
417
+ )
418
+ logout_button.click(
419
+ fn=lambda: False,
420
+ outputs=main_interface,
421
+ queue=False,
422
+ )
423
 
424
  # Launch the app with embedding enabled
425
  demo.queue(default_concurrency_limit=200)
426
+ demo.launch(show_api=False, ssr_mode=False, share=True, inline=True)