|
import spaces |
|
import gradio as gr |
|
from gradio_client import Client |
|
import cv2 |
|
from moviepy.editor import * |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import re |
|
import torch |
|
from transformers import pipeline |
|
|
|
zephyr_model = "HuggingFaceH4/zephyr-7b-beta" |
|
pipe = pipeline("text-generation", model=zephyr_model, torch_dtype=torch.bfloat16, device_map="auto") |
|
|
|
standard_sys = f""" |
|
You will be provided a list of visual details observed at regular intervals, along with an audio description. These pieces of information originate from a single video. The visual details are extracted from the video at fixed time intervals and represent consecutive frames. Typically, the video consists of a brief sequence showing one or more subjects... |
|
|
|
Please note that the following list of image descriptions (visual details) was obtained by extracting individual frames from a continuous video featuring one or more subjects. Depending on the case, all depicted individuals may correspond to the same person(s), with minor variations due to changes in lighting, angle, and facial expressions over time. Regardless, assume temporal continuity among the frames unless otherwise specified. |
|
|
|
Audio events are actual recordings from the video, representing sounds and spoken words independent of the visuals. While audio events offer rich context and background information, elucidating the environment and ambient noises, the visual representation tends to focus mainly on the primary subjects. Despite the high likelihood of alignment, there might be rare occasions where audio information doesn't precisely match the visual aspect. In such circumstances, prioritize visual evidence, and cautiously incorporate seemingly incongruous auditory clues into your summary. Exercise vigilance when reconciling conflicts and maintain a strong commitment to fidelity in generating a comprehensive overview. Your job is to integrate these multimodal inputs intelligently and provide a very short resume about what is happening in the origin video. Provide a succinct yet thorough overview of what you understood. |
|
""" |
|
|
|
def extract_frames(video_in, interval=24, output_format='.jpg'): |
|
"""Extract frames from a video at a specified interval and store them in a list. |
|
|
|
Args: |
|
- video_in: string or path-like object pointing to the video file |
|
- interval: integer specifying how many frames apart to extract images (default: 5) |
|
- output_format: string indicating desired format for saved images (default: '.jpg') |
|
|
|
Returns: |
|
A list of strings containing paths to saved images. |
|
""" |
|
|
|
|
|
vidcap = cv2.VideoCapture(video_in) |
|
frames = [] |
|
count = 0 |
|
|
|
|
|
while True: |
|
success, image = vidcap.read() |
|
|
|
|
|
if success: |
|
print('Read a new frame:', success) |
|
|
|
|
|
if count % interval == 0: |
|
filename = f'frame_{count // interval}{output_format}' |
|
frames.append(filename) |
|
cv2.imwrite(filename, image) |
|
print(f'Saved {filename}') |
|
|
|
|
|
count += 1 |
|
|
|
|
|
else: |
|
break |
|
|
|
|
|
vidcap.release() |
|
print('Done extracting frames!') |
|
|
|
return frames |
|
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
from PIL import Image |
|
|
|
model_id = "vikhyatk/moondream2" |
|
revision = "2024-03-06" |
|
model = AutoModelForCausalLM.from_pretrained( |
|
model_id, trust_remote_code=True, revision=revision |
|
) |
|
tokenizer = AutoTokenizer.from_pretrained(model_id, revision=revision) |
|
|
|
@spaces.GPU() |
|
def process_image(image_in): |
|
''' |
|
client = Client("https://vikhyatk-moondream1.hf.space/") |
|
result = client.predict( |
|
image_in, # filepath in 'image' Image component |
|
"Describe precisely the image in one sentence.", # str in 'Question' Textbox component |
|
api_name="/answer_question" |
|
#api_name="/predict" |
|
) |
|
print(result) |
|
return result |
|
''' |
|
image = Image.open(image_in) |
|
enc_image = model.encode_image(image) |
|
result = model.answer_question(enc_image, "Describe the image in one sentence.", tokenizer) |
|
print(result) |
|
return result |
|
|
|
def extract_audio(video_path): |
|
video_clip = VideoFileClip(video_path) |
|
audio_clip = video_clip.audio |
|
audio_clip.write_audiofile("output_audio.mp3") |
|
return "output_audio.mp3" |
|
|
|
def get_salmonn(audio_in): |
|
salmonn_prompt = "Please describe the audio" |
|
client = Client("fffiloni/SALMONN-7B-gradio") |
|
result = client.predict( |
|
audio_in, |
|
salmonn_prompt, |
|
4, |
|
1, |
|
0.9, |
|
api_name="/gradio_answer" |
|
) |
|
print(result) |
|
return result |
|
|
|
@spaces.GPU() |
|
def llm_process(user_prompt): |
|
agent_maker_sys = standard_sys |
|
|
|
instruction = f""" |
|
<|system|> |
|
{agent_maker_sys}</s> |
|
<|user|> |
|
""" |
|
|
|
prompt = f"{instruction.strip()}\n{user_prompt}</s>" |
|
outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) |
|
pattern = r'\<\|system\|\>(.*?)\<\|assistant\|\>' |
|
cleaned_text = re.sub(pattern, '', outputs[0]["generated_text"], flags=re.DOTALL) |
|
|
|
print(f"SUGGESTED video description: {cleaned_text}") |
|
return cleaned_text.lstrip("\n") |
|
|
|
def infer(video_in): |
|
|
|
frame_files = extract_frames(video_in) |
|
|
|
|
|
processed_texts = [] |
|
for frame_file in frame_files: |
|
text = process_image(frame_file) |
|
processed_texts.append(text) |
|
print(processed_texts) |
|
|
|
|
|
string_list = '\n'.join(processed_texts) |
|
|
|
|
|
extracted_audio = extract_audio(video_in) |
|
print(extracted_audio) |
|
|
|
|
|
audio_content_described = get_salmonn(extracted_audio) |
|
|
|
|
|
formatted_captions = f""" |
|
### Visual events:\n{string_list}\n ### Audio events:\n{audio_content_described} |
|
""" |
|
print(formatted_captions) |
|
|
|
|
|
video_description_from_llm = llm_process(formatted_captions) |
|
|
|
return video_description_from_llm |
|
|
|
with gr.Blocks() as demo : |
|
with gr.Column(elem_id="col-container"): |
|
gr.HTML(""" |
|
<h2 style="text-align: center;">Video description</h2> |
|
""") |
|
video_in = gr.Video(label="Video input") |
|
submit_btn = gr.Button("Submit") |
|
video_description = gr.Textbox(label="Video description") |
|
submit_btn.click( |
|
fn = infer, |
|
inputs = [video_in], |
|
outputs = [video_description] |
|
) |
|
demo.queue().launch() |