prithivMLmods commited on
Commit
babe1cb
·
verified ·
1 Parent(s): 1c04d6b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -349,9 +349,9 @@ def generate(
349
  return
350
 
351
  # NEW: GEMMA3-4B VIDEO Branch
352
- if lower_text.startswith("@gemma3-4b-video"):
353
  # Remove the video flag from the prompt.
354
- prompt_clean = re.sub(r"@gemma3-4b-video", "", text, flags=re.IGNORECASE).strip().strip('"')
355
  if files:
356
  # Assume the first file is a video.
357
  video_path = files[0]
@@ -483,10 +483,10 @@ demo = gr.ChatInterface(
483
  examples=[
484
 
485
  [{"text": "@gemma3-4b Explain the Image", "files": ["examples/3.jpg"]}],
486
- [{"text": "@gemma3-4b-video Describe the video", "files": ["examples/Missing.mp4"]}],
487
- [{"text": "@gemma3-4b-video Explain what is happening in this video ?", "files": ["examples/oreo.mp4"]}],
488
- [{"text": "@gemma3-4b-video Summarize the events in this video", "files": ["examples/sky.mp4"]}],
489
- [{"text": "@gemma3-4b-video What is in the video ?", "files": ["examples/redlight.mp4"]}],
490
  [{"text": "@gemma3-4b Where do the major drought happen?", "files": ["examples/111.png"]}],
491
  [{"text": "@gemma3-4b Transcription of the letter", "files": ["examples/222.png"]}],
492
  ['@lightningv5 Chocolate dripping from a donut'],
@@ -498,9 +498,9 @@ demo = gr.ChatInterface(
498
  ],
499
  cache_examples=False,
500
  type="messages",
501
- description="# **Imagineo Chat `@gemma3-4b 'prompt..', @gemma3-4b-video, @lightningv5, etc..`**",
502
  fill_height=True,
503
- textbox=gr.MultimodalTextbox(label="Query Input", file_types=["image", "video"], file_count="multiple", placeholder="use the tags @gemma3-4b for multimodal, @gemma3-4b-video for video, @lightningv5, @lightningv4, @turbov3 for image gen !"),
504
  stop_btn="Stop Generation",
505
  multimodal=True,
506
  )
 
349
  return
350
 
351
  # NEW: GEMMA3-4B VIDEO Branch
352
+ if lower_text.startswith("@video-infer"):
353
  # Remove the video flag from the prompt.
354
+ prompt_clean = re.sub(r"@video-infer", "", text, flags=re.IGNORECASE).strip().strip('"')
355
  if files:
356
  # Assume the first file is a video.
357
  video_path = files[0]
 
483
  examples=[
484
 
485
  [{"text": "@gemma3-4b Explain the Image", "files": ["examples/3.jpg"]}],
486
+ [{"text": "@video-infer Describe the video", "files": ["examples/Missing.mp4"]}],
487
+ [{"text": "@video-infer Explain what is happening in this video ?", "files": ["examples/oreo.mp4"]}],
488
+ [{"text": "@video-infer Summarize the events in this video", "files": ["examples/sky.mp4"]}],
489
+ [{"text": "@video-infer What is in the video ?", "files": ["examples/redlight.mp4"]}],
490
  [{"text": "@gemma3-4b Where do the major drought happen?", "files": ["examples/111.png"]}],
491
  [{"text": "@gemma3-4b Transcription of the letter", "files": ["examples/222.png"]}],
492
  ['@lightningv5 Chocolate dripping from a donut'],
 
498
  ],
499
  cache_examples=False,
500
  type="messages",
501
+ description="# **Imagineo Chat `@gemma3-4b 'prompt..', @video-infer for video understanding!`**",
502
  fill_height=True,
503
+ textbox=gr.MultimodalTextbox(label="Query Input", file_types=["image", "video"], file_count="multiple", placeholder="use the tags @gemma3-4b for multimodal, @video-infer for video, @lightningv5, @lightningv4, @turbov3 for image gen !"),
504
  stop_btn="Stop Generation",
505
  multimodal=True,
506
  )