Spaces:
Running
Running
File size: 9,382 Bytes
38aeb3a 02556a5 38aeb3a 02556a5 38aeb3a d8d4f3c 38aeb3a 02556a5 38aeb3a addc2db 38aeb3a 02556a5 38aeb3a 02556a5 d8d4f3c 02556a5 d8d4f3c 02556a5 7e348d8 addc2db 7e348d8 38aeb3a 7e348d8 38aeb3a d8d4f3c 38aeb3a 02556a5 38aeb3a 7e348d8 90f84f0 7e348d8 38aeb3a 02556a5 38aeb3a 02556a5 38aeb3a 02556a5 7e348d8 02556a5 38aeb3a 02556a5 885201d 02556a5 38aeb3a 45423d7 38aeb3a 9e34de2 45423d7 38aeb3a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 |
import gradio as gr
import requests
from PIL import Image
from transformers import BlipProcessor, BlipForConditionalGeneration
import torch
from diffusers import AnimateDiffPipeline, LCMScheduler, MotionAdapter
from moviepy.editor import concatenate_videoclips, AudioFileClip
from moviepy.video.io.ImageSequenceClip import ImageSequenceClip
from transformers import AutoProcessor, MusicgenForConditionalGeneration
import scipy.io.wavfile
import re
import numpy as np
import os
import io
import tempfile
# 定义图像到文本函数
def img2text(image):
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
inputs = processor(image, return_tensors="pt")
out = model.generate(**inputs)
caption = processor.decode(out[0], skip_special_tokens=True)
print(caption)
return caption
# 定义文本生成函数
def text2text(user_input):
api_key = openai_apikey
base_url = "https://openrouter.ai/api/v1"
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
}
data = {
"model": "openai/gpt-3.5-turbo",
"messages": [
{
"role": "system",
"content": (
"You are an expert who is very good at writing stories. Please expand it into a continuous story based on the input, and logically cut the story into sentences. Each sentence is a scene (as many sentences and scenes as possible, and at least 10 sentences). Each sentence is required The content of the sentence description should be detailed and do not use rhetorical techniques, and no ambiguous words such as pronouns should appear in the sentence. Be as detailed as possible to accurately describe who is doing what, and the scene descriptions before and after should have a certain correlation. In addition, I require your answer to follow a certain format. Let me give you an example. For example, I enter: a dolphin jumping out of the water at sunset. "
"Your answer format: "
"""
[1] The sun nears the horizon, illuminating the calm sea surface with a warm glow.
[2] A dolphin swims swiftly below the calm sea surface, moving closer to the top.
[3] The dolphin uses its powerful tail fin to prepare for a leap out of the water.
[4] The dolphin's body starts to emerge from the water, exposing itself above the surface.
[5] The dolphin leaps completely out of the water, creating an arch with its body in the air.
[6] The dolphin rotates its body in the air before it begins its descent back to the water.
[7] The dolphin's head and back make the first contact with the water, creating a splash.
[8] The dolphin fully submerges under the water, causing the splashes around it to slowly disperse.
[9] The dolphin moves forward underwater, gradually disappearing into the dimming light of the sunset.
"""
"My input is as follows, please answer me in the format without adding any other words."
)
},
{ "role": "user", "content": user_input }
]
}
response = requests.post(f"{base_url}/chat/completions", headers=headers, json=data)
response.raise_for_status()
completion = response.json()
print(completion['choices'][0]['message']['content'])
return completion['choices'][0]['message']['content']
import torch
from diffusers import AnimateDiffPipeline, LCMScheduler, MotionAdapter
from diffusers.utils import export_to_gif
import re
def text2vid(input_text):
# 使用正则表达式分割输入文本并提取句子
sentences = re.findall(r'\[\d+\] (.+?)(?:\n|\Z)', input_text)
# 加载动作适配器和动画扩散管道
adapter = MotionAdapter.from_pretrained("wangfuyun/AnimateLCM", config_file="wangfuyun/AnimateLCM/AnimateLCM/config.json", torch_dtype=torch.float16)
pipe = AnimateDiffPipeline.from_pretrained("emilianJR/epiCRealism", motion_adapter=adapter, torch_dtype=torch.float16)
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config, beta_schedule="linear")
# 加载LoRA权重
pipe.load_lora_weights("wangfuyun/AnimateLCM", weight_name="AnimateLCM_sd15_t2v_lora.safetensors", adapter_name="lcm-lora")
# 设置适配器并启用功能
try:
pipe.set_adapters(["lcm-lora"], [0.8])
except ValueError as e:
print("Ignoring the error:", str(e))
pipe.enable_vae_slicing()
pipe.enable_model_cpu_offload()
all_frames = [] # 存储所有句子的所有帧
# 循环遍历每个句子,生成动画并导出为GIF
for index, sentence in enumerate(sentences):
output = pipe(
#prompt=sentence + ", 4k, high resolution",
prompt=sentence + ", cartoon",
negative_prompt="bad quality, worse quality, low resolution",
num_frames=24,
guidance_scale=2.0,
num_inference_steps=6,
generator=torch.Generator("cpu").manual_seed(0)
)
frames = output.frames[0]
all_frames.extend(frames) # 添加每个句子的帧到all_frames
return all_frames
def text2text_A(user_input):
# 设置API密钥和基础URL
api_key = openai_apikey
base_url = "https://openrouter.ai/api/v1"
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
}
data = {
"model": "openai/gpt-3.5-turbo",
"messages": [
{
"role": "system",
"content": (
"You are an expert in music criticism, please match this story with a suitable musical style based on my input and describe it, please make sure you follow my format output and do not add any other statements e.g. Input: in a small tavern everyone danced, the bartender poured drinks for everyone, everyone had a good time and was very happy and sang and danced. Output: 80s pop track with bassy drums and synth."
"Again, please make sure you follow the format of the output, here is my input:"
)
},
{ "role": "user", "content": user_input }
]
}
response = requests.post(f"{base_url}/chat/completions", headers=headers, json=data)
response.raise_for_status() # 确保请求成功
completion = response.json()
print(completion['choices'][0]['message']['content'])
return completion['choices'][0]['message']['content']
# 定义文本到音频函数
def text2audio(text_input, duration_seconds):
processor = AutoProcessor.from_pretrained("facebook/musicgen-small")
model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small")
inputs = processor(text=[text_input], padding=True, return_tensors="pt")
max_new_tokens = int((duration_seconds / 5) * 256)
audio_values = model.generate(**inputs, max_new_tokens=max_new_tokens)
print(duration_seconds)
return audio_values[0, 0].numpy(), model.config.audio_encoder.sampling_rate
# 定义生成结果视频的函数
def result_generate(video_clip, audio_clip):
video = video_clip.set_audio(audio_clip)
video_buffer = BytesIO()
video.write_videofile(video_buffer, codec="libx264", audio_codec="aac")
video_buffer.seek(0)
return video_buffer
def generate_video(image):
# 获取图像描述
text = img2text(image)
# 生成详细的文本场景描述
sentences = text2text(text)
# 生成视频帧
video_frames = text2vid(sentences)
# 转换视频帧为numpy数组
video_frames = [np.array(frame) for frame in video_frames]
# 创建视频片段
video_clip = ImageSequenceClip(video_frames, fps=24)
video_duration = video_clip.duration
# 生成音频数据
audio_text = text2text_A(text)
audio_data, audio_rate = text2audio(audio_text, video_duration)
# 将音频数据写入临时文件
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmpfile:
scipy.io.wavfile.write(tmpfile, audio_rate, audio_data)
tmpfile_path = tmpfile.name
# 创建AudioFileClip对象
audio_clip = AudioFileClip(tmpfile_path)
# 将音频添加到视频中
video_clip = video_clip.set_audio(audio_clip)
print("audio_done")
# 将视频写入临时文件
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as tmpfile:
video_clip.write_videofile(tmpfile.name, codec="libx264", audio_codec="aac")
video_file_path = tmpfile.name
# 读取临时文件数据并删除
with open(video_file_path, 'rb') as f:
video_data = f.read()
os.remove(video_file_path)
os.remove(tmpfile_path)
print("video_done")
return video_data
# 定义 Gradio 接口
interface = gr.Interface(
fn=lambda img: generate_video(img),
inputs=gr.Image(type="pil"),
outputs=gr.Video(),
title="InspiroV Video Generation",
description="Upload an image to generate a video",
theme="soft"
)
# 启动 Gradio 应用
interface.launch()
|