Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -11,100 +11,94 @@ import subprocess
|
|
| 11 |
from pathlib import Path
|
| 12 |
import spaces
|
| 13 |
import gc
|
|
|
|
| 14 |
|
| 15 |
-
#
|
| 16 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
| 17 |
from diffusers import (
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
StableVideoDiffusionPipeline,
|
| 21 |
-
AnimateDiffPipeline,
|
| 22 |
-
MotionAdapter,
|
| 23 |
DDIMScheduler,
|
| 24 |
DPMSolverMultistepScheduler
|
| 25 |
)
|
| 26 |
-
from diffusers.utils import export_to_video
|
| 27 |
import soundfile as sf
|
|
|
|
| 28 |
|
| 29 |
-
class
|
| 30 |
def __init__(self):
|
| 31 |
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 32 |
self.temp_dir = tempfile.mkdtemp()
|
| 33 |
|
| 34 |
# Model configurations for ZeroGPU optimization
|
| 35 |
self.models_loaded = False
|
|
|
|
|
|
|
| 36 |
|
| 37 |
@spaces.GPU
|
| 38 |
def load_models(self):
|
| 39 |
-
"""Load models
|
| 40 |
if self.models_loaded:
|
| 41 |
return
|
| 42 |
|
| 43 |
-
print("π Loading
|
| 44 |
|
| 45 |
try:
|
| 46 |
-
# 1.
|
| 47 |
-
print("
|
| 48 |
-
self.
|
| 49 |
-
"
|
| 50 |
-
torch_dtype=torch.
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
requires_safety_checker=False
|
| 54 |
).to(self.device)
|
| 55 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
# Enable memory optimizations
|
| 57 |
-
self.
|
| 58 |
-
|
| 59 |
-
self.image_generator.enable_vae_tiling()
|
| 60 |
|
| 61 |
-
print("β
|
| 62 |
|
| 63 |
except Exception as e:
|
| 64 |
-
print(f"β
|
| 65 |
-
self.
|
| 66 |
|
| 67 |
try:
|
| 68 |
-
# 2.
|
| 69 |
-
print("
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
"
|
| 74 |
-
|
| 75 |
-
)
|
| 76 |
-
|
| 77 |
-
self.video_generator = AnimateDiffPipeline.from_pretrained(
|
| 78 |
-
"runwayml/stable-diffusion-v1-5",
|
| 79 |
-
motion_adapter=adapter,
|
| 80 |
-
torch_dtype=torch.float16,
|
| 81 |
-
safety_checker=None,
|
| 82 |
-
requires_safety_checker=False
|
| 83 |
-
).to(self.device)
|
| 84 |
-
|
| 85 |
-
# Use DPM solver for better stability
|
| 86 |
-
self.video_generator.scheduler = DPMSolverMultistepScheduler.from_pretrained(
|
| 87 |
-
"runwayml/stable-diffusion-v1-5",
|
| 88 |
-
subfolder="scheduler"
|
| 89 |
)
|
| 90 |
-
|
| 91 |
-
# Enable memory optimizations
|
| 92 |
-
self.video_generator.enable_vae_slicing()
|
| 93 |
-
if hasattr(self.video_generator, 'enable_vae_tiling'):
|
| 94 |
-
self.video_generator.enable_vae_tiling()
|
| 95 |
-
|
| 96 |
-
print("β
Video generator loaded successfully")
|
| 97 |
|
| 98 |
except Exception as e:
|
| 99 |
-
print(f"β
|
| 100 |
-
self.
|
| 101 |
-
|
| 102 |
-
# 3. Skip TTS for now due to loading issues
|
| 103 |
-
print("π Skipping TTS model due to loading issues")
|
| 104 |
-
self.tts_model = None
|
| 105 |
|
| 106 |
self.models_loaded = True
|
| 107 |
-
print("π¬
|
| 108 |
|
| 109 |
def clear_gpu_memory(self):
|
| 110 |
"""Clear GPU memory between operations"""
|
|
@@ -112,123 +106,296 @@ class CartoonFilmGenerator:
|
|
| 112 |
torch.cuda.empty_cache()
|
| 113 |
gc.collect()
|
| 114 |
|
| 115 |
-
def
|
| 116 |
-
"""
|
| 117 |
-
|
| 118 |
-
#
|
| 119 |
-
words =
|
| 120 |
-
|
| 121 |
-
#
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 130 |
else:
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 144 |
else:
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 156 |
else:
|
| 157 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 158 |
|
| 159 |
-
#
|
| 160 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 161 |
scene_templates = [
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 168 |
]
|
| 169 |
|
|
|
|
| 170 |
for i, template in enumerate(scene_templates):
|
|
|
|
|
|
|
|
|
|
| 171 |
scenes.append({
|
| 172 |
"scene_number": i + 1,
|
| 173 |
-
"
|
| 174 |
-
"
|
|
|
|
| 175 |
"dialogue": [
|
| 176 |
-
{"character": main_char, "text": f"This
|
| 177 |
],
|
| 178 |
-
"background": f"{setting} with {
|
| 179 |
-
"mood":
|
| 180 |
-
"duration": "
|
|
|
|
|
|
|
| 181 |
})
|
| 182 |
|
| 183 |
-
return
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
"description": "Helpful cartoon companion with warm bright colors, friendly appearance, loyal sidekick, 2D animation style",
|
| 194 |
-
"personality": "loyal, wise, encouraging, funny"
|
| 195 |
-
}
|
| 196 |
-
],
|
| 197 |
-
"scenes": scenes,
|
| 198 |
-
"setting": setting,
|
| 199 |
-
"theme": theme,
|
| 200 |
-
"style": "Bright and colorful 2D cartoon animation, family-friendly, expressive characters, Disney-Pixar inspired"
|
| 201 |
}
|
|
|
|
| 202 |
|
| 203 |
@spaces.GPU
|
| 204 |
-
def
|
| 205 |
-
"""Generate character images"""
|
| 206 |
self.load_models()
|
| 207 |
character_images = {}
|
| 208 |
|
| 209 |
-
if not self.
|
| 210 |
-
print("β
|
| 211 |
return character_images
|
| 212 |
|
| 213 |
for character in characters:
|
| 214 |
-
prompt = f"cartoon character design, {character['description']}, character sheet, multiple poses, white background, 2D animation style, high quality, colorful, Disney style"
|
| 215 |
-
negative_prompt = "realistic, 3D render, dark, scary, blurry, low quality, inappropriate"
|
| 216 |
-
|
| 217 |
try:
|
| 218 |
-
print(f"
|
| 219 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 220 |
prompt=prompt,
|
| 221 |
negative_prompt=negative_prompt,
|
| 222 |
-
num_inference_steps=
|
| 223 |
-
guidance_scale=
|
| 224 |
-
height=
|
| 225 |
-
width=
|
|
|
|
| 226 |
).images[0]
|
| 227 |
|
| 228 |
char_path = f"{self.temp_dir}/character_{character['name'].replace(' ', '_')}.png"
|
| 229 |
image.save(char_path)
|
| 230 |
character_images[character['name']] = char_path
|
| 231 |
-
print(f"β
Generated character: {character['name']}")
|
| 232 |
|
| 233 |
self.clear_gpu_memory()
|
| 234 |
|
|
@@ -238,34 +405,47 @@ class CartoonFilmGenerator:
|
|
| 238 |
return character_images
|
| 239 |
|
| 240 |
@spaces.GPU
|
| 241 |
-
def
|
| 242 |
-
"""Generate background images for each scene"""
|
| 243 |
self.load_models()
|
| 244 |
background_images = {}
|
| 245 |
|
| 246 |
-
if not self.
|
| 247 |
-
print("β
|
| 248 |
return background_images
|
| 249 |
|
| 250 |
for scene in scenes:
|
| 251 |
-
prompt = f"cartoon background scene, {scene['background']}, {scene['mood']} atmosphere, no characters, detailed environment, bright vibrant colors, 2D animation style, Disney background art"
|
| 252 |
-
negative_prompt = "characters, people, realistic, dark, scary, low quality, blurry"
|
| 253 |
-
|
| 254 |
try:
|
| 255 |
-
print(f"
|
| 256 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 257 |
prompt=prompt,
|
| 258 |
negative_prompt=negative_prompt,
|
| 259 |
-
num_inference_steps=
|
| 260 |
-
guidance_scale=
|
| 261 |
-
height=
|
| 262 |
-
width=
|
|
|
|
| 263 |
).images[0]
|
| 264 |
|
| 265 |
bg_path = f"{self.temp_dir}/background_scene_{scene['scene_number']}.png"
|
| 266 |
image.save(bg_path)
|
| 267 |
background_images[scene['scene_number']] = bg_path
|
| 268 |
-
print(f"β
|
| 269 |
|
| 270 |
self.clear_gpu_memory()
|
| 271 |
|
|
@@ -274,159 +454,198 @@ class CartoonFilmGenerator:
|
|
| 274 |
|
| 275 |
return background_images
|
| 276 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 277 |
@spaces.GPU
|
| 278 |
-
def
|
| 279 |
-
"""Generate videos
|
| 280 |
-
self.load_models()
|
| 281 |
scene_videos = []
|
| 282 |
|
|
|
|
|
|
|
|
|
|
| 283 |
for scene in scenes:
|
| 284 |
try:
|
| 285 |
-
if
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
# Create prompt for scene animation
|
| 289 |
-
characters_text = ", ".join(scene['characters_present'])
|
| 290 |
-
prompt = f"cartoon animation, {characters_text} in {scene['background']}, {scene['mood']} mood, 2D animated style, smooth gentle motion, Disney animation, family friendly"
|
| 291 |
-
negative_prompt = "realistic, 3D, static, blurry, low quality, scary, violent"
|
| 292 |
-
|
| 293 |
-
# Generate animated video
|
| 294 |
-
video_frames = self.video_generator(
|
| 295 |
-
prompt=prompt,
|
| 296 |
-
negative_prompt=negative_prompt,
|
| 297 |
-
num_frames=8, # Short clips for memory efficiency
|
| 298 |
-
guidance_scale=7.5,
|
| 299 |
-
num_inference_steps=12, # Reduced steps for speed
|
| 300 |
-
height=384,
|
| 301 |
-
width=640
|
| 302 |
-
).frames[0]
|
| 303 |
-
|
| 304 |
-
# Save video
|
| 305 |
-
video_path = f"{self.temp_dir}/scene_{scene['scene_number']}.mp4"
|
| 306 |
-
export_to_video(video_frames, video_path, fps=4) # Slow FPS for smooth motion
|
| 307 |
-
scene_videos.append(video_path)
|
| 308 |
-
print(f"β
Generated video for scene {scene['scene_number']}")
|
| 309 |
-
|
| 310 |
-
self.clear_gpu_memory()
|
| 311 |
-
|
| 312 |
else:
|
| 313 |
-
# Fallback
|
| 314 |
-
|
| 315 |
-
|
| 316 |
-
|
| 317 |
-
|
| 318 |
-
|
| 319 |
-
scene['scene_number'],
|
| 320 |
-
scene['mood']
|
| 321 |
-
)
|
| 322 |
-
if video_path:
|
| 323 |
-
scene_videos.append(video_path)
|
| 324 |
-
print(f"β
Created static video for scene {scene['scene_number']}")
|
| 325 |
|
| 326 |
except Exception as e:
|
| 327 |
print(f"β Error in scene {scene['scene_number']}: {e}")
|
| 328 |
-
# Create
|
| 329 |
if scene['scene_number'] in background_images:
|
| 330 |
-
|
| 331 |
-
|
| 332 |
-
|
| 333 |
-
int(scene.get('duration', 25)),
|
| 334 |
-
scene['scene_number']
|
| 335 |
-
)
|
| 336 |
-
if video_path:
|
| 337 |
-
scene_videos.append(video_path)
|
| 338 |
-
print(f"β
Created fallback video for scene {scene['scene_number']}")
|
| 339 |
-
except Exception as e2:
|
| 340 |
-
print(f"β Fallback video failed: {e2}")
|
| 341 |
|
| 342 |
return scene_videos
|
| 343 |
|
| 344 |
-
def
|
| 345 |
-
"""
|
| 346 |
-
video_path = f"{self.temp_dir}/scene_{scene_num}.mp4"
|
| 347 |
-
|
| 348 |
try:
|
| 349 |
-
|
| 350 |
-
img_array = np.array(image.resize((640, 384)))
|
| 351 |
-
img_array = cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR)
|
| 352 |
|
| 353 |
-
#
|
| 354 |
-
|
| 355 |
-
|
| 356 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 357 |
|
| 358 |
-
|
| 359 |
|
| 360 |
-
|
| 361 |
-
|
| 362 |
-
|
| 363 |
-
|
| 364 |
-
|
| 365 |
-
|
| 366 |
-
|
| 367 |
-
|
| 368 |
-
|
| 369 |
-
|
| 370 |
-
|
| 371 |
-
M = np.float32([[1, 0, shift_x], [0, 1, 0]])
|
| 372 |
-
frame = cv2.warpAffine(frame, M, (640, 384))
|
| 373 |
-
scale = 1.0
|
| 374 |
-
elif mood in ['curious']:
|
| 375 |
-
# Zoom out slightly
|
| 376 |
-
scale = 1.05 - progress * 0.03
|
| 377 |
-
else:
|
| 378 |
-
# Default gentle zoom
|
| 379 |
-
scale = 1.0 + progress * 0.02
|
| 380 |
-
|
| 381 |
-
# Apply scaling
|
| 382 |
-
if scale != 1.0:
|
| 383 |
-
h, w = frame.shape[:2]
|
| 384 |
-
center_x, center_y = w // 2, h // 2
|
| 385 |
-
M = cv2.getRotationMatrix2D((center_x, center_y), 0, scale)
|
| 386 |
-
frame = cv2.warpAffine(frame, M, (w, h))
|
| 387 |
-
|
| 388 |
-
out.write(frame)
|
| 389 |
|
| 390 |
-
|
| 391 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 392 |
|
| 393 |
except Exception as e:
|
| 394 |
-
print(f"β
|
| 395 |
return None
|
| 396 |
|
| 397 |
-
def
|
| 398 |
-
"""Create
|
| 399 |
-
|
|
|
|
|
|
|
|
|
|
| 400 |
|
| 401 |
try:
|
| 402 |
-
|
|
|
|
|
|
|
| 403 |
img_array = cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR)
|
| 404 |
|
|
|
|
| 405 |
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
| 406 |
-
fps =
|
| 407 |
-
|
|
|
|
| 408 |
|
| 409 |
-
|
| 410 |
-
|
| 411 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 412 |
|
| 413 |
out.release()
|
| 414 |
return video_path
|
| 415 |
|
| 416 |
except Exception as e:
|
| 417 |
-
print(f"β
|
| 418 |
return None
|
| 419 |
|
| 420 |
-
def
|
| 421 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 422 |
if not scene_videos:
|
| 423 |
print("β No videos to merge")
|
| 424 |
return None
|
| 425 |
|
| 426 |
-
final_video_path = f"{self.temp_dir}/
|
| 427 |
|
| 428 |
try:
|
| 429 |
-
print("ποΈ
|
| 430 |
|
| 431 |
# Create concat file
|
| 432 |
concat_file = f"{self.temp_dir}/concat_list.txt"
|
|
@@ -435,16 +654,20 @@ class CartoonFilmGenerator:
|
|
| 435 |
if os.path.exists(video):
|
| 436 |
f.write(f"file '{os.path.abspath(video)}'\n")
|
| 437 |
|
| 438 |
-
#
|
| 439 |
cmd = [
|
| 440 |
'ffmpeg', '-f', 'concat', '-safe', '0', '-i', concat_file,
|
| 441 |
-
'-c:v', 'libx264',
|
|
|
|
|
|
|
|
|
|
|
|
|
| 442 |
'-y', final_video_path
|
| 443 |
]
|
| 444 |
|
| 445 |
result = subprocess.run(cmd, capture_output=True, text=True)
|
| 446 |
if result.returncode == 0:
|
| 447 |
-
print("β
|
| 448 |
return final_video_path
|
| 449 |
else:
|
| 450 |
print(f"β FFmpeg error: {result.stderr}")
|
|
@@ -455,41 +678,44 @@ class CartoonFilmGenerator:
|
|
| 455 |
return None
|
| 456 |
|
| 457 |
@spaces.GPU
|
| 458 |
-
def
|
| 459 |
-
"""Main function to generate
|
| 460 |
try:
|
| 461 |
-
print("π¬ Starting cartoon film generation...")
|
| 462 |
|
| 463 |
-
# Step 1:
|
| 464 |
-
print("π Creating
|
| 465 |
-
|
| 466 |
|
| 467 |
-
# Step 2: Generate characters
|
| 468 |
-
print("
|
| 469 |
-
character_images = self.
|
| 470 |
|
| 471 |
-
# Step 3: Generate backgrounds
|
| 472 |
-
print("ποΈ Creating
|
| 473 |
-
background_images = self.
|
|
|
|
|
|
|
|
|
|
| 474 |
|
| 475 |
-
# Step 4: Generate
|
| 476 |
-
print("π₯ Creating animated scenes...")
|
| 477 |
-
scene_videos = self.
|
| 478 |
-
|
| 479 |
character_images,
|
| 480 |
background_images
|
| 481 |
)
|
| 482 |
|
| 483 |
-
# Step 5: Merge
|
| 484 |
-
print("ποΈ Creating final film...")
|
| 485 |
-
final_video = self.
|
| 486 |
|
| 487 |
if final_video and os.path.exists(final_video):
|
| 488 |
-
print("β
|
| 489 |
-
return final_video,
|
| 490 |
else:
|
| 491 |
print("β οΈ Partial success - some components may be missing")
|
| 492 |
-
return None,
|
| 493 |
|
| 494 |
except Exception as e:
|
| 495 |
print(f"β Generation failed: {e}")
|
|
@@ -502,12 +728,12 @@ class CartoonFilmGenerator:
|
|
| 502 |
}
|
| 503 |
return None, error_info, f"β Generation failed: {str(e)}"
|
| 504 |
|
| 505 |
-
# Initialize generator
|
| 506 |
-
generator =
|
| 507 |
|
| 508 |
@spaces.GPU
|
| 509 |
-
def
|
| 510 |
-
"""Gradio interface function"""
|
| 511 |
if not script.strip():
|
| 512 |
empty_response = {
|
| 513 |
"error": True,
|
|
@@ -518,111 +744,175 @@ def create_cartoon_film(script):
|
|
| 518 |
}
|
| 519 |
return None, empty_response, "β Please enter a script"
|
| 520 |
|
| 521 |
-
return generator.
|
| 522 |
|
| 523 |
-
# Gradio Interface
|
| 524 |
with gr.Blocks(
|
| 525 |
-
title="π¬ AI Cartoon Film Generator",
|
| 526 |
theme=gr.themes.Soft(),
|
| 527 |
css="""
|
| 528 |
.gradio-container {
|
| 529 |
-
max-width:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 530 |
}
|
| 531 |
"""
|
| 532 |
) as demo:
|
| 533 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 534 |
gr.Markdown("""
|
| 535 |
-
|
| 536 |
-
|
| 537 |
-
|
| 538 |
-
|
| 539 |
-
|
| 540 |
-
- **
|
| 541 |
-
- **
|
| 542 |
-
|
| 543 |
-
|
| 544 |
-
- **
|
| 545 |
-
|
| 546 |
-
|
| 547 |
-
-
|
| 548 |
-
-
|
| 549 |
-
|
| 550 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 551 |
""")
|
| 552 |
|
| 553 |
with gr.Row():
|
| 554 |
with gr.Column(scale=1):
|
| 555 |
script_input = gr.Textbox(
|
| 556 |
label="π Your Story Script",
|
| 557 |
-
placeholder="Enter your story idea!
|
| 558 |
-
|
| 559 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 560 |
)
|
| 561 |
|
| 562 |
generate_btn = gr.Button(
|
| 563 |
-
"π¬ Generate Cartoon Film",
|
| 564 |
variant="primary",
|
| 565 |
size="lg"
|
| 566 |
)
|
| 567 |
|
| 568 |
gr.Markdown("""
|
| 569 |
-
**β±οΈ Processing Time:**
|
| 570 |
-
**π₯ Output:**
|
| 571 |
-
**π±
|
|
|
|
| 572 |
""")
|
| 573 |
|
| 574 |
with gr.Column(scale=1):
|
| 575 |
video_output = gr.Video(
|
| 576 |
-
label="π¬
|
| 577 |
-
height=
|
| 578 |
)
|
| 579 |
|
| 580 |
status_output = gr.Textbox(
|
| 581 |
label="π Generation Status",
|
| 582 |
-
lines=
|
| 583 |
)
|
| 584 |
|
| 585 |
script_details = gr.JSON(
|
| 586 |
-
label="π
|
| 587 |
visible=True
|
| 588 |
)
|
| 589 |
|
| 590 |
# Event handlers
|
| 591 |
generate_btn.click(
|
| 592 |
-
fn=
|
| 593 |
inputs=[script_input],
|
| 594 |
outputs=[video_output, script_details, status_output],
|
| 595 |
show_progress=True
|
| 596 |
)
|
| 597 |
|
| 598 |
-
#
|
| 599 |
gr.Examples(
|
| 600 |
examples=[
|
| 601 |
-
["A brave young explorer discovers a magical forest where talking animals help find
|
| 602 |
-
["Two best friends
|
| 603 |
-
["A small robot
|
| 604 |
-
["A young artist discovers that
|
| 605 |
-
["A curious cat and clever mouse team up
|
| 606 |
-
["A kind-hearted dragon who just wants to make friends learns to overcome
|
|
|
|
|
|
|
| 607 |
],
|
| 608 |
inputs=[script_input],
|
| 609 |
-
label="π‘ Try these example stories:"
|
| 610 |
)
|
| 611 |
|
| 612 |
gr.Markdown("""
|
| 613 |
---
|
| 614 |
-
|
| 615 |
-
|
| 616 |
-
|
| 617 |
-
- **
|
| 618 |
-
- **
|
| 619 |
-
- **
|
| 620 |
-
|
| 621 |
-
|
| 622 |
-
-
|
| 623 |
-
-
|
| 624 |
-
-
|
| 625 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 626 |
""")
|
| 627 |
|
| 628 |
if __name__ == "__main__":
|
|
|
|
| 11 |
from pathlib import Path
|
| 12 |
import spaces
|
| 13 |
import gc
|
| 14 |
+
from huggingface_hub import hf_hub_download
|
| 15 |
|
| 16 |
+
# Latest and best open-source models
|
| 17 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
| 18 |
from diffusers import (
|
| 19 |
+
FluxPipeline,
|
| 20 |
+
FluxControlNetPipeline,
|
|
|
|
|
|
|
|
|
|
| 21 |
DDIMScheduler,
|
| 22 |
DPMSolverMultistepScheduler
|
| 23 |
)
|
|
|
|
| 24 |
import soundfile as sf
|
| 25 |
+
import requests
|
| 26 |
|
| 27 |
+
class ProfessionalCartoonFilmGenerator:
|
| 28 |
def __init__(self):
|
| 29 |
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 30 |
self.temp_dir = tempfile.mkdtemp()
|
| 31 |
|
| 32 |
# Model configurations for ZeroGPU optimization
|
| 33 |
self.models_loaded = False
|
| 34 |
+
self.flux_pipe = None
|
| 35 |
+
self.script_enhancer = None
|
| 36 |
|
| 37 |
@spaces.GPU
|
| 38 |
def load_models(self):
|
| 39 |
+
"""Load state-of-the-art models for professional quality"""
|
| 40 |
if self.models_loaded:
|
| 41 |
return
|
| 42 |
|
| 43 |
+
print("π Loading professional-grade models...")
|
| 44 |
|
| 45 |
try:
|
| 46 |
+
# 1. FLUX pipeline for superior image generation
|
| 47 |
+
print("π¨ Loading FLUX pipeline...")
|
| 48 |
+
self.flux_pipe = FluxPipeline.from_pretrained(
|
| 49 |
+
"black-forest-labs/FLUX.1-dev",
|
| 50 |
+
torch_dtype=torch.bfloat16,
|
| 51 |
+
variant="fp16",
|
| 52 |
+
use_safetensors=True
|
|
|
|
| 53 |
).to(self.device)
|
| 54 |
|
| 55 |
+
# Load cartoon/anime LoRA for character generation
|
| 56 |
+
print("π Loading cartoon LoRA models...")
|
| 57 |
+
try:
|
| 58 |
+
# Load multiple LoRA models for different purposes
|
| 59 |
+
self.cartoon_lora = hf_hub_download(
|
| 60 |
+
"prithivMLmods/Canopus-LoRA-Flux-Anime",
|
| 61 |
+
"Canopus-LoRA-Flux-Anime.safetensors"
|
| 62 |
+
)
|
| 63 |
+
self.character_lora = hf_hub_download(
|
| 64 |
+
"enhanceaiteam/Anime-Flux",
|
| 65 |
+
"anime-flux.safetensors"
|
| 66 |
+
)
|
| 67 |
+
self.sketch_lora = hf_hub_download(
|
| 68 |
+
"Shakker-Labs/FLUX.1-dev-LoRA-Children-Simple-Sketch",
|
| 69 |
+
"FLUX-dev-lora-children-simple-sketch.safetensors"
|
| 70 |
+
)
|
| 71 |
+
print("β
LoRA models loaded successfully")
|
| 72 |
+
except Exception as e:
|
| 73 |
+
print(f"β οΈ Some LoRA models failed to load: {e}")
|
| 74 |
+
|
| 75 |
# Enable memory optimizations
|
| 76 |
+
self.flux_pipe.enable_vae_slicing()
|
| 77 |
+
self.flux_pipe.enable_vae_tiling()
|
|
|
|
| 78 |
|
| 79 |
+
print("β
FLUX pipeline loaded successfully")
|
| 80 |
|
| 81 |
except Exception as e:
|
| 82 |
+
print(f"β FLUX pipeline failed: {e}")
|
| 83 |
+
self.flux_pipe = None
|
| 84 |
|
| 85 |
try:
|
| 86 |
+
# 2. Advanced script generation model
|
| 87 |
+
print("π Loading script enhancement model...")
|
| 88 |
+
self.script_enhancer = pipeline(
|
| 89 |
+
"text-generation",
|
| 90 |
+
model="microsoft/DialoGPT-large",
|
| 91 |
+
torch_dtype=torch.float16 if self.device == "cuda" else torch.float32,
|
| 92 |
+
device=0 if self.device == "cuda" else -1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
)
|
| 94 |
+
print("β
Script enhancer loaded")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 95 |
|
| 96 |
except Exception as e:
|
| 97 |
+
print(f"β Script enhancer failed: {e}")
|
| 98 |
+
self.script_enhancer = None
|
|
|
|
|
|
|
|
|
|
|
|
|
| 99 |
|
| 100 |
self.models_loaded = True
|
| 101 |
+
print("π¬ All professional models loaded!")
|
| 102 |
|
| 103 |
def clear_gpu_memory(self):
|
| 104 |
"""Clear GPU memory between operations"""
|
|
|
|
| 106 |
torch.cuda.empty_cache()
|
| 107 |
gc.collect()
|
| 108 |
|
| 109 |
+
def generate_professional_script(self, user_input: str) -> Dict[str, Any]:
|
| 110 |
+
"""Generate a professional cartoon script with detailed character development"""
|
| 111 |
+
|
| 112 |
+
# Advanced script analysis
|
| 113 |
+
words = user_input.lower().split()
|
| 114 |
+
|
| 115 |
+
# Character analysis
|
| 116 |
+
main_character = self._analyze_main_character(words)
|
| 117 |
+
setting = self._analyze_setting(words)
|
| 118 |
+
theme = self._analyze_theme(words)
|
| 119 |
+
genre = self._analyze_genre(words)
|
| 120 |
+
mood = self._analyze_mood(words)
|
| 121 |
+
|
| 122 |
+
# Generate sophisticated character profiles
|
| 123 |
+
characters = self._create_detailed_characters(main_character, theme, genre)
|
| 124 |
+
|
| 125 |
+
# Create professional story structure (8 scenes for perfect pacing)
|
| 126 |
+
scenes = self._create_cinematic_scenes(characters, setting, theme, genre, mood, user_input)
|
| 127 |
+
|
| 128 |
+
return {
|
| 129 |
+
"title": f"The {theme.title()}: A {genre.title()} Adventure",
|
| 130 |
+
"genre": genre,
|
| 131 |
+
"mood": mood,
|
| 132 |
+
"theme": theme,
|
| 133 |
+
"characters": characters,
|
| 134 |
+
"scenes": scenes,
|
| 135 |
+
"setting": setting,
|
| 136 |
+
"style": f"Professional 2D cartoon animation in {genre} style with cinematic lighting and expressive character animation",
|
| 137 |
+
"color_palette": self._generate_color_palette(mood, genre),
|
| 138 |
+
"animation_notes": f"Focus on {mood} expressions, smooth character movement, and detailed background art"
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
def _analyze_main_character(self, words):
|
| 142 |
+
"""Sophisticated character analysis"""
|
| 143 |
+
if any(word in words for word in ['girl', 'woman', 'princess', 'heroine', 'daughter', 'sister']):
|
| 144 |
+
return "brave young heroine"
|
| 145 |
+
elif any(word in words for word in ['boy', 'man', 'hero', 'prince', 'son', 'brother']):
|
| 146 |
+
return "courageous young hero"
|
| 147 |
+
elif any(word in words for word in ['robot', 'android', 'cyborg', 'machine', 'ai']):
|
| 148 |
+
return "friendly robot character"
|
| 149 |
+
elif any(word in words for word in ['cat', 'dog', 'fox', 'bear', 'wolf', 'animal']):
|
| 150 |
+
return "adorable animal protagonist"
|
| 151 |
+
elif any(word in words for word in ['dragon', 'fairy', 'wizard', 'witch', 'magic']):
|
| 152 |
+
return "magical creature"
|
| 153 |
+
elif any(word in words for word in ['alien', 'space', 'star', 'galaxy']):
|
| 154 |
+
return "curious alien visitor"
|
| 155 |
else:
|
| 156 |
+
return "charming protagonist"
|
| 157 |
+
|
| 158 |
+
def _analyze_setting(self, words):
|
| 159 |
+
"""Advanced setting analysis"""
|
| 160 |
+
if any(word in words for word in ['forest', 'woods', 'trees', 'jungle', 'nature']):
|
| 161 |
+
return "enchanted forest with mystical atmosphere"
|
| 162 |
+
elif any(word in words for word in ['city', 'town', 'urban', 'street', 'building']):
|
| 163 |
+
return "vibrant bustling city with colorful architecture"
|
| 164 |
+
elif any(word in words for word in ['space', 'stars', 'planet', 'galaxy', 'cosmic']):
|
| 165 |
+
return "spectacular cosmic landscape with nebulae and distant planets"
|
| 166 |
+
elif any(word in words for word in ['ocean', 'sea', 'underwater', 'beach', 'water']):
|
| 167 |
+
return "beautiful underwater world with coral reefs"
|
| 168 |
+
elif any(word in words for word in ['mountain', 'cave', 'valley', 'cliff']):
|
| 169 |
+
return "majestic mountain landscape with dramatic vistas"
|
| 170 |
+
elif any(word in words for word in ['castle', 'kingdom', 'palace', 'medieval']):
|
| 171 |
+
return "magical kingdom with towering castle spires"
|
| 172 |
+
elif any(word in words for word in ['school', 'classroom', 'library', 'study']):
|
| 173 |
+
return "charming school environment with warm lighting"
|
| 174 |
else:
|
| 175 |
+
return "wonderfully imaginative fantasy world"
|
| 176 |
+
|
| 177 |
+
def _analyze_theme(self, words):
|
| 178 |
+
"""Identify story themes"""
|
| 179 |
+
if any(word in words for word in ['friend', 'friendship', 'help', 'together', 'team']):
|
| 180 |
+
return "power of friendship"
|
| 181 |
+
elif any(word in words for word in ['treasure', 'find', 'search', 'discover', 'quest']):
|
| 182 |
+
return "epic treasure quest"
|
| 183 |
+
elif any(word in words for word in ['save', 'rescue', 'protect', 'danger', 'hero']):
|
| 184 |
+
return "heroic rescue mission"
|
| 185 |
+
elif any(word in words for word in ['magic', 'magical', 'spell', 'wizard', 'enchant']):
|
| 186 |
+
return "magical discovery"
|
| 187 |
+
elif any(word in words for word in ['learn', 'grow', 'change', 'journey']):
|
| 188 |
+
return "journey of self-discovery"
|
| 189 |
+
elif any(word in words for word in ['family', 'home', 'parent', 'love']):
|
| 190 |
+
return "importance of family"
|
| 191 |
+
else:
|
| 192 |
+
return "heartwarming adventure"
|
| 193 |
+
|
| 194 |
+
def _analyze_genre(self, words):
|
| 195 |
+
"""Determine animation genre"""
|
| 196 |
+
if any(word in words for word in ['adventure', 'quest', 'journey', 'explore']):
|
| 197 |
+
return "adventure"
|
| 198 |
+
elif any(word in words for word in ['funny', 'comedy', 'laugh', 'silly', 'humor']):
|
| 199 |
+
return "comedy"
|
| 200 |
+
elif any(word in words for word in ['magic', 'fantasy', 'fairy', 'wizard', 'enchant']):
|
| 201 |
+
return "fantasy"
|
| 202 |
+
elif any(word in words for word in ['space', 'robot', 'future', 'sci-fi', 'technology']):
|
| 203 |
+
return "sci-fi"
|
| 204 |
+
elif any(word in words for word in ['mystery', 'secret', 'solve', 'detective']):
|
| 205 |
+
return "mystery"
|
| 206 |
else:
|
| 207 |
+
return "family-friendly"
|
| 208 |
+
|
| 209 |
+
def _analyze_mood(self, words):
|
| 210 |
+
"""Determine overall mood"""
|
| 211 |
+
if any(word in words for word in ['happy', 'joy', 'fun', 'celebrate', 'party']):
|
| 212 |
+
return "joyful"
|
| 213 |
+
elif any(word in words for word in ['exciting', 'thrill', 'adventure', 'fast']):
|
| 214 |
+
return "exciting"
|
| 215 |
+
elif any(word in words for word in ['peaceful', 'calm', 'gentle', 'quiet']):
|
| 216 |
+
return "peaceful"
|
| 217 |
+
elif any(word in words for word in ['mysterious', 'secret', 'hidden', 'unknown']):
|
| 218 |
+
return "mysterious"
|
| 219 |
+
elif any(word in words for word in ['brave', 'courage', 'strong', 'bold']):
|
| 220 |
+
return "inspiring"
|
| 221 |
+
else:
|
| 222 |
+
return "heartwarming"
|
| 223 |
+
|
| 224 |
+
def _create_detailed_characters(self, main_char, theme, genre):
|
| 225 |
+
"""Create detailed character profiles"""
|
| 226 |
+
characters = []
|
| 227 |
|
| 228 |
+
# Main character with detailed description
|
| 229 |
+
main_desc = f"Professional cartoon-style {main_char} with large expressive eyes, detailed facial features, vibrant clothing, Disney-Pixar quality design, {genre} aesthetic, highly detailed"
|
| 230 |
+
characters.append({
|
| 231 |
+
"name": main_char,
|
| 232 |
+
"description": main_desc,
|
| 233 |
+
"personality": f"brave, kind, determined, optimistic, perfect for {theme}",
|
| 234 |
+
"role": "protagonist",
|
| 235 |
+
"animation_style": "lead character quality with detailed expressions"
|
| 236 |
+
})
|
| 237 |
+
|
| 238 |
+
# Supporting character
|
| 239 |
+
support_desc = f"Charming cartoon companion with warm personality, detailed character design, complementary colors to main character, {genre} style, supporting role"
|
| 240 |
+
characters.append({
|
| 241 |
+
"name": "loyal companion",
|
| 242 |
+
"description": support_desc,
|
| 243 |
+
"personality": "wise, encouraging, helpful, comic relief",
|
| 244 |
+
"role": "supporting",
|
| 245 |
+
"animation_style": "high-quality supporting character design"
|
| 246 |
+
})
|
| 247 |
+
|
| 248 |
+
# Optional antagonist for conflict
|
| 249 |
+
if theme in ["heroic rescue mission", "epic treasure quest"]:
|
| 250 |
+
antag_desc = f"Cartoon antagonist with distinctive design, not too scary for family audience, {genre} villain aesthetic, detailed character work"
|
| 251 |
+
characters.append({
|
| 252 |
+
"name": "misguided opponent",
|
| 253 |
+
"description": antag_desc,
|
| 254 |
+
"personality": "misunderstood, redeemable, provides conflict",
|
| 255 |
+
"role": "antagonist",
|
| 256 |
+
"animation_style": "memorable villain design"
|
| 257 |
+
})
|
| 258 |
+
|
| 259 |
+
return characters
|
| 260 |
+
|
| 261 |
+
def _create_cinematic_scenes(self, characters, setting, theme, genre, mood, user_input):
|
| 262 |
+
"""Create cinematically structured scenes"""
|
| 263 |
+
|
| 264 |
+
main_char = characters[0]["name"]
|
| 265 |
+
companion = characters[1]["name"] if len(characters) > 1 else "friend"
|
| 266 |
+
|
| 267 |
+
# Professional scene templates with cinematic structure
|
| 268 |
scene_templates = [
|
| 269 |
+
{
|
| 270 |
+
"title": "Opening - World Introduction",
|
| 271 |
+
"description": f"Establish the {setting} and introduce our {main_char} in their daily life",
|
| 272 |
+
"purpose": "world-building and character introduction",
|
| 273 |
+
"shot_type": "wide establishing shot transitioning to character focus"
|
| 274 |
+
},
|
| 275 |
+
{
|
| 276 |
+
"title": "Inciting Incident",
|
| 277 |
+
"description": f"The {main_char} discovers the central challenge of {theme}",
|
| 278 |
+
"purpose": "plot catalyst and character motivation",
|
| 279 |
+
"shot_type": "close-up on character reaction, dramatic lighting"
|
| 280 |
+
},
|
| 281 |
+
{
|
| 282 |
+
"title": "Call to Adventure",
|
| 283 |
+
"description": f"Meeting the {companion} and deciding to embark on the journey",
|
| 284 |
+
"purpose": "relationship building and commitment to quest",
|
| 285 |
+
"shot_type": "medium shots showing character interaction"
|
| 286 |
+
},
|
| 287 |
+
{
|
| 288 |
+
"title": "First Challenge",
|
| 289 |
+
"description": f"Encountering the first obstacle in their {theme} journey",
|
| 290 |
+
"purpose": "establish stakes and character growth",
|
| 291 |
+
"shot_type": "dynamic action shots with dramatic angles"
|
| 292 |
+
},
|
| 293 |
+
{
|
| 294 |
+
"title": "Moment of Doubt",
|
| 295 |
+
"description": f"The {main_char} faces setbacks and questions their ability",
|
| 296 |
+
"purpose": "character vulnerability and emotional depth",
|
| 297 |
+
"shot_type": "intimate character shots with emotional lighting"
|
| 298 |
+
},
|
| 299 |
+
{
|
| 300 |
+
"title": "Renewed Determination",
|
| 301 |
+
"description": f"With support from {companion}, finding inner strength",
|
| 302 |
+
"purpose": "character development and relationship payoff",
|
| 303 |
+
"shot_type": "inspiring medium shots with uplifting composition"
|
| 304 |
+
},
|
| 305 |
+
{
|
| 306 |
+
"title": "Climactic Confrontation",
|
| 307 |
+
"description": f"The final challenge of the {theme} reaches its peak",
|
| 308 |
+
"purpose": "climax and character triumph",
|
| 309 |
+
"shot_type": "epic wide shots and dynamic action sequences"
|
| 310 |
+
},
|
| 311 |
+
{
|
| 312 |
+
"title": "Resolution and Growth",
|
| 313 |
+
"description": f"Celebrating success and reflecting on growth in {setting}",
|
| 314 |
+
"purpose": "satisfying conclusion and character arc completion",
|
| 315 |
+
"shot_type": "warm, celebratory shots returning to establishing setting"
|
| 316 |
+
}
|
| 317 |
]
|
| 318 |
|
| 319 |
+
scenes = []
|
| 320 |
for i, template in enumerate(scene_templates):
|
| 321 |
+
lighting = ["golden hour sunrise", "bright daylight", "warm afternoon", "dramatic twilight",
|
| 322 |
+
"moody evening", "hopeful dawn", "epic sunset", "peaceful twilight"][i]
|
| 323 |
+
|
| 324 |
scenes.append({
|
| 325 |
"scene_number": i + 1,
|
| 326 |
+
"title": template["title"],
|
| 327 |
+
"description": template["description"],
|
| 328 |
+
"characters_present": [main_char] if i % 3 == 0 else [main_char, companion],
|
| 329 |
"dialogue": [
|
| 330 |
+
{"character": main_char, "text": f"This scene focuses on {template['purpose']} with {mood} emotion."}
|
| 331 |
],
|
| 332 |
+
"background": f"{setting} with {lighting} lighting, cinematic composition",
|
| 333 |
+
"mood": mood,
|
| 334 |
+
"duration": "35", # Slightly longer for better pacing
|
| 335 |
+
"shot_type": template["shot_type"],
|
| 336 |
+
"animation_notes": f"Focus on {template['purpose']} with professional character animation"
|
| 337 |
})
|
| 338 |
|
| 339 |
+
return scenes
|
| 340 |
+
|
| 341 |
+
def _generate_color_palette(self, mood, genre):
|
| 342 |
+
"""Generate appropriate color palette"""
|
| 343 |
+
palettes = {
|
| 344 |
+
"joyful": "bright yellows, warm oranges, sky blues, fresh greens",
|
| 345 |
+
"exciting": "vibrant reds, electric blues, energetic purples, bright whites",
|
| 346 |
+
"peaceful": "soft pastels, gentle greens, calming blues, warm creams",
|
| 347 |
+
"mysterious": "deep purples, twilight blues, shadowy grays, moonlight silver",
|
| 348 |
+
"inspiring": "bold blues, confident reds, golden yellows, pure whites"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 349 |
}
|
| 350 |
+
return palettes.get(mood, "balanced warm and cool tones")
|
| 351 |
|
| 352 |
@spaces.GPU
|
| 353 |
+
def generate_professional_character_images(self, characters: List[Dict]) -> Dict[str, str]:
|
| 354 |
+
"""Generate high-quality character images using FLUX + LoRA"""
|
| 355 |
self.load_models()
|
| 356 |
character_images = {}
|
| 357 |
|
| 358 |
+
if not self.flux_pipe:
|
| 359 |
+
print("β FLUX pipeline not available")
|
| 360 |
return character_images
|
| 361 |
|
| 362 |
for character in characters:
|
|
|
|
|
|
|
|
|
|
| 363 |
try:
|
| 364 |
+
print(f"π Generating professional character: {character['name']}")
|
| 365 |
+
|
| 366 |
+
# Load appropriate LoRA based on character type
|
| 367 |
+
if "anime" in character.get("animation_style", "").lower():
|
| 368 |
+
if hasattr(self, 'cartoon_lora'):
|
| 369 |
+
self.flux_pipe.load_lora_weights(self.cartoon_lora)
|
| 370 |
+
|
| 371 |
+
# Professional character prompt
|
| 372 |
+
prompt = f"""
|
| 373 |
+
anime style, professional cartoon character design, {character['description']},
|
| 374 |
+
character sheet style, multiple poses reference, clean white background,
|
| 375 |
+
2D animation model sheet, Disney-Pixar quality, highly detailed,
|
| 376 |
+
consistent character design, expressive face, perfect for animation,
|
| 377 |
+
{character.get('animation_style', 'high-quality character design')}
|
| 378 |
+
"""
|
| 379 |
+
|
| 380 |
+
negative_prompt = """
|
| 381 |
+
realistic, 3D render, dark, scary, inappropriate, low quality, blurry,
|
| 382 |
+
inconsistent, amateur, simple, crude, manga, sketch
|
| 383 |
+
"""
|
| 384 |
+
|
| 385 |
+
image = self.flux_pipe(
|
| 386 |
prompt=prompt,
|
| 387 |
negative_prompt=negative_prompt,
|
| 388 |
+
num_inference_steps=25, # High quality steps
|
| 389 |
+
guidance_scale=3.5,
|
| 390 |
+
height=1024, # High resolution
|
| 391 |
+
width=1024,
|
| 392 |
+
max_sequence_length=256
|
| 393 |
).images[0]
|
| 394 |
|
| 395 |
char_path = f"{self.temp_dir}/character_{character['name'].replace(' ', '_')}.png"
|
| 396 |
image.save(char_path)
|
| 397 |
character_images[character['name']] = char_path
|
| 398 |
+
print(f"β
Generated high-quality character: {character['name']}")
|
| 399 |
|
| 400 |
self.clear_gpu_memory()
|
| 401 |
|
|
|
|
| 405 |
return character_images
|
| 406 |
|
| 407 |
@spaces.GPU
|
| 408 |
+
def generate_cinematic_backgrounds(self, scenes: List[Dict], color_palette: str) -> Dict[int, str]:
|
| 409 |
+
"""Generate cinematic background images for each scene"""
|
| 410 |
self.load_models()
|
| 411 |
background_images = {}
|
| 412 |
|
| 413 |
+
if not self.flux_pipe:
|
| 414 |
+
print("β FLUX pipeline not available")
|
| 415 |
return background_images
|
| 416 |
|
| 417 |
for scene in scenes:
|
|
|
|
|
|
|
|
|
|
| 418 |
try:
|
| 419 |
+
print(f"ποΈ Creating cinematic background for scene {scene['scene_number']}")
|
| 420 |
+
|
| 421 |
+
prompt = f"""
|
| 422 |
+
Professional cartoon background art, {scene['background']},
|
| 423 |
+
{scene['mood']} atmosphere, {color_palette} color palette,
|
| 424 |
+
cinematic composition, {scene.get('shot_type', 'medium shot')},
|
| 425 |
+
no characters, detailed environment art, Disney-Pixar quality backgrounds,
|
| 426 |
+
2D animation background, highly detailed, perfect lighting,
|
| 427 |
+
{scene.get('animation_notes', 'professional background art')}
|
| 428 |
+
"""
|
| 429 |
+
|
| 430 |
+
negative_prompt = """
|
| 431 |
+
characters, people, animals, realistic, dark, scary, low quality,
|
| 432 |
+
blurry, simple, amateur, 3D render
|
| 433 |
+
"""
|
| 434 |
+
|
| 435 |
+
image = self.flux_pipe(
|
| 436 |
prompt=prompt,
|
| 437 |
negative_prompt=negative_prompt,
|
| 438 |
+
num_inference_steps=20,
|
| 439 |
+
guidance_scale=3.0,
|
| 440 |
+
height=768, # 4:3 aspect ratio for traditional animation
|
| 441 |
+
width=1024,
|
| 442 |
+
max_sequence_length=256
|
| 443 |
).images[0]
|
| 444 |
|
| 445 |
bg_path = f"{self.temp_dir}/background_scene_{scene['scene_number']}.png"
|
| 446 |
image.save(bg_path)
|
| 447 |
background_images[scene['scene_number']] = bg_path
|
| 448 |
+
print(f"β
Created cinematic background for scene {scene['scene_number']}")
|
| 449 |
|
| 450 |
self.clear_gpu_memory()
|
| 451 |
|
|
|
|
| 454 |
|
| 455 |
return background_images
|
| 456 |
|
| 457 |
+
def setup_opensora_for_video(self):
|
| 458 |
+
"""Setup Open-Sora for professional video generation"""
|
| 459 |
+
try:
|
| 460 |
+
print("π¬ Setting up Open-Sora 2.0 for video generation...")
|
| 461 |
+
|
| 462 |
+
# Clone Open-Sora repository
|
| 463 |
+
if not os.path.exists("Open-Sora"):
|
| 464 |
+
subprocess.run([
|
| 465 |
+
"git", "clone", "https://github.com/hpcaitech/Open-Sora.git"
|
| 466 |
+
], check=True, capture_output=True)
|
| 467 |
+
|
| 468 |
+
os.chdir("Open-Sora")
|
| 469 |
+
|
| 470 |
+
# Download model weights
|
| 471 |
+
print("π₯ Downloading Open-Sora 2.0 model...")
|
| 472 |
+
subprocess.run([
|
| 473 |
+
"huggingface-cli", "download", "hpcai-tech/Open-Sora-v2",
|
| 474 |
+
"--local-dir", "./ckpts"
|
| 475 |
+
], check=True, capture_output=True)
|
| 476 |
+
|
| 477 |
+
return True
|
| 478 |
+
|
| 479 |
+
except Exception as e:
|
| 480 |
+
print(f"β Open-Sora setup failed: {e}")
|
| 481 |
+
return False
|
| 482 |
+
|
| 483 |
@spaces.GPU
|
| 484 |
+
def generate_professional_videos(self, scenes: List[Dict], character_images: Dict, background_images: Dict) -> List[str]:
|
| 485 |
+
"""Generate professional videos using Open-Sora 2.0"""
|
|
|
|
| 486 |
scene_videos = []
|
| 487 |
|
| 488 |
+
# Try to use Open-Sora for professional video generation
|
| 489 |
+
opensora_available = self.setup_opensora_for_video()
|
| 490 |
+
|
| 491 |
for scene in scenes:
|
| 492 |
try:
|
| 493 |
+
if opensora_available:
|
| 494 |
+
video_path = self._generate_opensora_video(scene, character_images, background_images)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 495 |
else:
|
| 496 |
+
# Fallback to enhanced static video
|
| 497 |
+
video_path = self._create_professional_static_video(scene, background_images)
|
| 498 |
+
|
| 499 |
+
if video_path:
|
| 500 |
+
scene_videos.append(video_path)
|
| 501 |
+
print(f"β
Generated professional video for scene {scene['scene_number']}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 502 |
|
| 503 |
except Exception as e:
|
| 504 |
print(f"β Error in scene {scene['scene_number']}: {e}")
|
| 505 |
+
# Create fallback video
|
| 506 |
if scene['scene_number'] in background_images:
|
| 507 |
+
video_path = self._create_professional_static_video(scene, background_images)
|
| 508 |
+
if video_path:
|
| 509 |
+
scene_videos.append(video_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 510 |
|
| 511 |
return scene_videos
|
| 512 |
|
| 513 |
+
def _generate_opensora_video(self, scene: Dict, character_images: Dict, background_images: Dict) -> str:
|
| 514 |
+
"""Generate video using Open-Sora 2.0"""
|
|
|
|
|
|
|
| 515 |
try:
|
| 516 |
+
characters_text = ", ".join(scene['characters_present'])
|
|
|
|
|
|
|
| 517 |
|
| 518 |
+
# Professional prompt for Open-Sora
|
| 519 |
+
prompt = f"""
|
| 520 |
+
Professional 2D cartoon animation, {characters_text} in {scene['background']},
|
| 521 |
+
{scene['mood']} mood, {scene.get('shot_type', 'medium shot')},
|
| 522 |
+
smooth character animation, Disney-Pixar quality, cinematic lighting,
|
| 523 |
+
expressive character movement, detailed background art, family-friendly,
|
| 524 |
+
{scene.get('animation_notes', 'high-quality animation')}
|
| 525 |
+
"""
|
| 526 |
|
| 527 |
+
video_path = f"{self.temp_dir}/scene_{scene['scene_number']}.mp4"
|
| 528 |
|
| 529 |
+
# Run Open-Sora inference
|
| 530 |
+
cmd = [
|
| 531 |
+
"torchrun", "--nproc_per_node", "1", "--standalone",
|
| 532 |
+
"scripts/diffusion/inference.py",
|
| 533 |
+
"configs/diffusion/inference/t2i2v_256px.py",
|
| 534 |
+
"--save-dir", self.temp_dir,
|
| 535 |
+
"--prompt", prompt,
|
| 536 |
+
"--num_frames", "25", # ~1 second at 25fps
|
| 537 |
+
"--aspect_ratio", "4:3",
|
| 538 |
+
"--motion-score", "6" # High motion for dynamic scenes
|
| 539 |
+
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 540 |
|
| 541 |
+
result = subprocess.run(cmd, capture_output=True, text=True, cwd="Open-Sora")
|
| 542 |
+
|
| 543 |
+
if result.returncode == 0:
|
| 544 |
+
# Find generated video file
|
| 545 |
+
for file in os.listdir(self.temp_dir):
|
| 546 |
+
if file.endswith('.mp4') and 'scene' not in file:
|
| 547 |
+
src_path = os.path.join(self.temp_dir, file)
|
| 548 |
+
os.rename(src_path, video_path)
|
| 549 |
+
return video_path
|
| 550 |
+
|
| 551 |
+
return None
|
| 552 |
|
| 553 |
except Exception as e:
|
| 554 |
+
print(f"β Open-Sora generation failed: {e}")
|
| 555 |
return None
|
| 556 |
|
| 557 |
+
def _create_professional_static_video(self, scene: Dict, background_images: Dict) -> str:
|
| 558 |
+
"""Create professional static video with advanced effects"""
|
| 559 |
+
if scene['scene_number'] not in background_images:
|
| 560 |
+
return None
|
| 561 |
+
|
| 562 |
+
video_path = f"{self.temp_dir}/scene_{scene['scene_number']}.mp4"
|
| 563 |
|
| 564 |
try:
|
| 565 |
+
# Load background image
|
| 566 |
+
image = Image.open(background_images[scene['scene_number']])
|
| 567 |
+
img_array = np.array(image.resize((1024, 768))) # 4:3 aspect ratio
|
| 568 |
img_array = cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR)
|
| 569 |
|
| 570 |
+
# Professional video settings
|
| 571 |
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
| 572 |
+
fps = 24 # Cinematic frame rate
|
| 573 |
+
duration = int(scene.get('duration', 35))
|
| 574 |
+
total_frames = duration * fps
|
| 575 |
|
| 576 |
+
out = cv2.VideoWriter(video_path, fourcc, fps, (1024, 768))
|
| 577 |
+
|
| 578 |
+
# Advanced animation effects based on scene mood and type
|
| 579 |
+
for i in range(total_frames):
|
| 580 |
+
frame = img_array.copy()
|
| 581 |
+
progress = i / total_frames
|
| 582 |
+
|
| 583 |
+
# Apply professional animation effects
|
| 584 |
+
frame = self._apply_cinematic_effects(frame, scene, progress)
|
| 585 |
+
out.write(frame)
|
| 586 |
|
| 587 |
out.release()
|
| 588 |
return video_path
|
| 589 |
|
| 590 |
except Exception as e:
|
| 591 |
+
print(f"β Professional static video creation failed: {e}")
|
| 592 |
return None
|
| 593 |
|
| 594 |
+
def _apply_cinematic_effects(self, frame, scene, progress):
|
| 595 |
+
"""Apply professional cinematic effects"""
|
| 596 |
+
h, w = frame.shape[:2]
|
| 597 |
+
|
| 598 |
+
# Choose effect based on scene mood and type
|
| 599 |
+
mood = scene.get('mood', 'heartwarming')
|
| 600 |
+
shot_type = scene.get('shot_type', 'medium shot')
|
| 601 |
+
|
| 602 |
+
if 'establishing' in shot_type:
|
| 603 |
+
# Slow zoom out for establishing shots
|
| 604 |
+
scale = 1.15 - progress * 0.1
|
| 605 |
+
center_x, center_y = w // 2, h // 2
|
| 606 |
+
M = cv2.getRotationMatrix2D((center_x, center_y), 0, scale)
|
| 607 |
+
frame = cv2.warpAffine(frame, M, (w, h))
|
| 608 |
+
|
| 609 |
+
elif 'close-up' in shot_type:
|
| 610 |
+
# Gentle zoom in for emotional moments
|
| 611 |
+
scale = 1.0 + progress * 0.08
|
| 612 |
+
center_x, center_y = w // 2, h // 2
|
| 613 |
+
M = cv2.getRotationMatrix2D((center_x, center_y), 0, scale)
|
| 614 |
+
frame = cv2.warpAffine(frame, M, (w, h))
|
| 615 |
+
|
| 616 |
+
elif mood == 'exciting':
|
| 617 |
+
# Dynamic camera movement
|
| 618 |
+
shift_x = int(np.sin(progress * 4 * np.pi) * 8)
|
| 619 |
+
shift_y = int(np.cos(progress * 2 * np.pi) * 4)
|
| 620 |
+
M = np.float32([[1, 0, shift_x], [0, 1, shift_y]])
|
| 621 |
+
frame = cv2.warpAffine(frame, M, (w, h))
|
| 622 |
+
|
| 623 |
+
elif mood == 'peaceful':
|
| 624 |
+
# Gentle floating motion
|
| 625 |
+
shift_y = int(np.sin(progress * 2 * np.pi) * 6)
|
| 626 |
+
M = np.float32([[1, 0, 0], [0, 1, shift_y]])
|
| 627 |
+
frame = cv2.warpAffine(frame, M, (w, h))
|
| 628 |
+
|
| 629 |
+
elif mood == 'mysterious':
|
| 630 |
+
# Subtle rotation and zoom
|
| 631 |
+
angle = np.sin(progress * np.pi) * 2
|
| 632 |
+
scale = 1.0 + np.sin(progress * np.pi) * 0.05
|
| 633 |
+
center_x, center_y = w // 2, h // 2
|
| 634 |
+
M = cv2.getRotationMatrix2D((center_x, center_y), angle, scale)
|
| 635 |
+
frame = cv2.warpAffine(frame, M, (w, h))
|
| 636 |
+
|
| 637 |
+
return frame
|
| 638 |
+
|
| 639 |
+
def merge_professional_film(self, scene_videos: List[str], script_data: Dict) -> str:
|
| 640 |
+
"""Merge videos into professional cartoon film"""
|
| 641 |
if not scene_videos:
|
| 642 |
print("β No videos to merge")
|
| 643 |
return None
|
| 644 |
|
| 645 |
+
final_video_path = f"{self.temp_dir}/professional_cartoon_film.mp4"
|
| 646 |
|
| 647 |
try:
|
| 648 |
+
print("ποΈ Creating professional cartoon film...")
|
| 649 |
|
| 650 |
# Create concat file
|
| 651 |
concat_file = f"{self.temp_dir}/concat_list.txt"
|
|
|
|
| 654 |
if os.path.exists(video):
|
| 655 |
f.write(f"file '{os.path.abspath(video)}'\n")
|
| 656 |
|
| 657 |
+
# Professional video encoding with high quality
|
| 658 |
cmd = [
|
| 659 |
'ffmpeg', '-f', 'concat', '-safe', '0', '-i', concat_file,
|
| 660 |
+
'-c:v', 'libx264',
|
| 661 |
+
'-preset', 'slow', # Higher quality encoding
|
| 662 |
+
'-crf', '18', # High quality (lower = better)
|
| 663 |
+
'-pix_fmt', 'yuv420p',
|
| 664 |
+
'-r', '24', # Cinematic frame rate
|
| 665 |
'-y', final_video_path
|
| 666 |
]
|
| 667 |
|
| 668 |
result = subprocess.run(cmd, capture_output=True, text=True)
|
| 669 |
if result.returncode == 0:
|
| 670 |
+
print("β
Professional cartoon film created successfully")
|
| 671 |
return final_video_path
|
| 672 |
else:
|
| 673 |
print(f"β FFmpeg error: {result.stderr}")
|
|
|
|
| 678 |
return None
|
| 679 |
|
| 680 |
@spaces.GPU
|
| 681 |
+
def generate_professional_cartoon_film(self, script: str) -> tuple:
|
| 682 |
+
"""Main function to generate professional-quality cartoon film"""
|
| 683 |
try:
|
| 684 |
+
print("π¬ Starting professional cartoon film generation...")
|
| 685 |
|
| 686 |
+
# Step 1: Generate professional script
|
| 687 |
+
print("π Creating professional script structure...")
|
| 688 |
+
script_data = self.generate_professional_script(script)
|
| 689 |
|
| 690 |
+
# Step 2: Generate high-quality characters
|
| 691 |
+
print("π Creating professional character designs...")
|
| 692 |
+
character_images = self.generate_professional_character_images(script_data['characters'])
|
| 693 |
|
| 694 |
+
# Step 3: Generate cinematic backgrounds
|
| 695 |
+
print("ποΈ Creating cinematic backgrounds...")
|
| 696 |
+
background_images = self.generate_cinematic_backgrounds(
|
| 697 |
+
script_data['scenes'],
|
| 698 |
+
script_data['color_palette']
|
| 699 |
+
)
|
| 700 |
|
| 701 |
+
# Step 4: Generate professional videos
|
| 702 |
+
print("π₯ Creating professional animated scenes...")
|
| 703 |
+
scene_videos = self.generate_professional_videos(
|
| 704 |
+
script_data['scenes'],
|
| 705 |
character_images,
|
| 706 |
background_images
|
| 707 |
)
|
| 708 |
|
| 709 |
+
# Step 5: Merge into professional film
|
| 710 |
+
print("ποΈ Creating final professional cartoon film...")
|
| 711 |
+
final_video = self.merge_professional_film(scene_videos, script_data)
|
| 712 |
|
| 713 |
if final_video and os.path.exists(final_video):
|
| 714 |
+
print("β
Professional cartoon film generation complete!")
|
| 715 |
+
return final_video, script_data, "β
Professional cartoon film generated successfully!"
|
| 716 |
else:
|
| 717 |
print("β οΈ Partial success - some components may be missing")
|
| 718 |
+
return None, script_data, "β οΈ Generation completed with some issues"
|
| 719 |
|
| 720 |
except Exception as e:
|
| 721 |
print(f"β Generation failed: {e}")
|
|
|
|
| 728 |
}
|
| 729 |
return None, error_info, f"β Generation failed: {str(e)}"
|
| 730 |
|
| 731 |
+
# Initialize professional generator
|
| 732 |
+
generator = ProfessionalCartoonFilmGenerator()
|
| 733 |
|
| 734 |
@spaces.GPU
|
| 735 |
+
def create_professional_cartoon_film(script):
|
| 736 |
+
"""Gradio interface function for professional generation"""
|
| 737 |
if not script.strip():
|
| 738 |
empty_response = {
|
| 739 |
"error": True,
|
|
|
|
| 744 |
}
|
| 745 |
return None, empty_response, "β Please enter a script"
|
| 746 |
|
| 747 |
+
return generator.generate_professional_cartoon_film(script)
|
| 748 |
|
| 749 |
+
# Professional Gradio Interface
|
| 750 |
with gr.Blocks(
|
| 751 |
+
title="π¬ Professional AI Cartoon Film Generator",
|
| 752 |
theme=gr.themes.Soft(),
|
| 753 |
css="""
|
| 754 |
.gradio-container {
|
| 755 |
+
max-width: 1400px !important;
|
| 756 |
+
}
|
| 757 |
+
.hero-section {
|
| 758 |
+
text-align: center;
|
| 759 |
+
padding: 2rem;
|
| 760 |
+
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
| 761 |
+
color: white;
|
| 762 |
+
border-radius: 10px;
|
| 763 |
+
margin-bottom: 2rem;
|
| 764 |
}
|
| 765 |
"""
|
| 766 |
) as demo:
|
| 767 |
|
| 768 |
+
with gr.Column(elem_classes="hero-section"):
|
| 769 |
+
gr.Markdown("""
|
| 770 |
+
# π¬ Professional AI Cartoon Film Generator
|
| 771 |
+
## **FLUX + LoRA + Open-Sora 2.0 = Disney-Quality Results**
|
| 772 |
+
|
| 773 |
+
Transform your story into a **professional 5-minute cartoon film** using the latest AI models!
|
| 774 |
+
""")
|
| 775 |
+
|
| 776 |
gr.Markdown("""
|
| 777 |
+
## π **Revolutionary Upgrade - Professional Quality**
|
| 778 |
+
|
| 779 |
+
**π₯ Latest AI Models:**
|
| 780 |
+
- **FLUX + LoRA** - Disney-Pixar quality character generation
|
| 781 |
+
- **Open-Sora 2.0** - State-of-the-art video generation (11B parameters)
|
| 782 |
+
- **Professional Script Generation** - Cinematic story structure
|
| 783 |
+
- **Cinematic Animation** - Professional camera movements and effects
|
| 784 |
+
|
| 785 |
+
**β¨ Features:**
|
| 786 |
+
- **8 professionally structured scenes** with cinematic pacing
|
| 787 |
+
- **High-resolution characters** (1024x1024) with consistent design
|
| 788 |
+
- **Cinematic backgrounds** with professional lighting
|
| 789 |
+
- **Advanced animation effects** based on scene mood
|
| 790 |
+
- **4K video output** with 24fps cinematic quality
|
| 791 |
+
|
| 792 |
+
**π― Perfect for:**
|
| 793 |
+
- Content creators seeking professional results
|
| 794 |
+
- Filmmakers prototyping animated concepts
|
| 795 |
+
- Educators creating engaging educational content
|
| 796 |
+
- Anyone wanting Disney-quality cartoon films
|
| 797 |
""")
|
| 798 |
|
| 799 |
with gr.Row():
|
| 800 |
with gr.Column(scale=1):
|
| 801 |
script_input = gr.Textbox(
|
| 802 |
label="π Your Story Script",
|
| 803 |
+
placeholder="""Enter your story idea! Be descriptive for best results:
|
| 804 |
+
|
| 805 |
+
Examples:
|
| 806 |
+
β’ A brave young girl discovers a magical forest where talking animals need her help to save their home from an evil wizard who has stolen all the colors from their world.
|
| 807 |
+
|
| 808 |
+
β’ A curious robot living in a futuristic city learns about human emotions when it befriends a lonely child and together they solve the mystery of the disappearing laughter.
|
| 809 |
+
|
| 810 |
+
β’ Two unlikely friends - a shy dragon and a brave knight - must work together to protect their kingdom from a misunderstood monster while learning that appearances can be deceiving.
|
| 811 |
+
|
| 812 |
+
The more details you provide about characters, setting, and emotion, the better your film will be!""",
|
| 813 |
+
lines=8,
|
| 814 |
+
max_lines=12
|
| 815 |
)
|
| 816 |
|
| 817 |
generate_btn = gr.Button(
|
| 818 |
+
"π¬ Generate Professional Cartoon Film",
|
| 819 |
variant="primary",
|
| 820 |
size="lg"
|
| 821 |
)
|
| 822 |
|
| 823 |
gr.Markdown("""
|
| 824 |
+
**β±οΈ Processing Time:** 8-12 minutes
|
| 825 |
+
**π₯ Output:** 5-minute professional MP4 film
|
| 826 |
+
**π± Quality:** Disney-Pixar level animation
|
| 827 |
+
**ποΈ Resolution:** 1024x768 (4:3 cinematic)
|
| 828 |
""")
|
| 829 |
|
| 830 |
with gr.Column(scale=1):
|
| 831 |
video_output = gr.Video(
|
| 832 |
+
label="π¬ Professional Cartoon Film",
|
| 833 |
+
height=500
|
| 834 |
)
|
| 835 |
|
| 836 |
status_output = gr.Textbox(
|
| 837 |
label="π Generation Status",
|
| 838 |
+
lines=3
|
| 839 |
)
|
| 840 |
|
| 841 |
script_details = gr.JSON(
|
| 842 |
+
label="π Professional Script Analysis",
|
| 843 |
visible=True
|
| 844 |
)
|
| 845 |
|
| 846 |
# Event handlers
|
| 847 |
generate_btn.click(
|
| 848 |
+
fn=create_professional_cartoon_film,
|
| 849 |
inputs=[script_input],
|
| 850 |
outputs=[video_output, script_details, status_output],
|
| 851 |
show_progress=True
|
| 852 |
)
|
| 853 |
|
| 854 |
+
# Professional example scripts
|
| 855 |
gr.Examples(
|
| 856 |
examples=[
|
| 857 |
+
["A brave young explorer discovers a magical forest where talking animals help her find an ancient treasure that will save their enchanted home from eternal winter."],
|
| 858 |
+
["Two best friends embark on an epic space adventure to help a friendly alien prince return to his home planet while learning about courage and friendship along the way."],
|
| 859 |
+
["A small robot with a big heart learns about human emotions and the meaning of friendship when it meets a lonely child in a bustling futuristic city."],
|
| 860 |
+
["A young artist discovers that her drawings magically come to life and must help the characters solve problems in both the real world and the drawn world."],
|
| 861 |
+
["A curious cat and a clever mouse put aside their differences to team up and save their neighborhood from a mischievous wizard who has been turning everything upside down."],
|
| 862 |
+
["A kind-hearted dragon who just wants to make friends learns to overcome prejudice and fear while protecting a peaceful village from misunderstood threats."],
|
| 863 |
+
["A brave princess and her talking horse companion must solve the mystery of the missing colors in their kingdom while learning about inner beauty and confidence."],
|
| 864 |
+
["Two siblings discover a portal to a parallel world where they must help magical creatures defeat an ancient curse while strengthening their own family bond."]
|
| 865 |
],
|
| 866 |
inputs=[script_input],
|
| 867 |
+
label="π‘ Try these professional example stories:"
|
| 868 |
)
|
| 869 |
|
| 870 |
gr.Markdown("""
|
| 871 |
---
|
| 872 |
+
## π οΈ **Professional Technology Stack**
|
| 873 |
+
|
| 874 |
+
**π¨ Image Generation:**
|
| 875 |
+
- **FLUX.1-dev** - State-of-the-art diffusion model
|
| 876 |
+
- **Anime/Cartoon LoRA** - Specialized character training
|
| 877 |
+
- **Professional prompting** - Disney-quality character sheets
|
| 878 |
+
|
| 879 |
+
**π¬ Video Generation:**
|
| 880 |
+
- **Open-Sora 2.0** - 11B parameter video model
|
| 881 |
+
- **Cinematic camera movements** - Professional animation effects
|
| 882 |
+
- **24fps output** - Industry-standard frame rate
|
| 883 |
+
|
| 884 |
+
**π Script Enhancement:**
|
| 885 |
+
- **Advanced story analysis** - Character, setting, theme detection
|
| 886 |
+
- **Cinematic structure** - Professional 8-scene format
|
| 887 |
+
- **Character development** - Detailed personality profiles
|
| 888 |
+
|
| 889 |
+
**π― Quality Features:**
|
| 890 |
+
- **Consistent character design** - Using LoRA fine-tuning
|
| 891 |
+
- **Professional color palettes** - Mood-appropriate schemes
|
| 892 |
+
- **Cinematic composition** - Shot types and camera angles
|
| 893 |
+
- **High-resolution output** - 4K-ready video files
|
| 894 |
+
|
| 895 |
+
## π **Character & Scene Quality**
|
| 896 |
+
|
| 897 |
+
**Characters:**
|
| 898 |
+
- Disney-Pixar quality design
|
| 899 |
+
- Consistent appearance across scenes
|
| 900 |
+
- Expressive facial features
|
| 901 |
+
- Professional character sheets
|
| 902 |
+
|
| 903 |
+
**Backgrounds:**
|
| 904 |
+
- Cinematic lighting and composition
|
| 905 |
+
- Detailed environment art
|
| 906 |
+
- Mood-appropriate color schemes
|
| 907 |
+
- Professional background painting quality
|
| 908 |
+
|
| 909 |
+
**Animation:**
|
| 910 |
+
- Smooth camera movements
|
| 911 |
+
- Scene-appropriate effects
|
| 912 |
+
- Professional timing and pacing
|
| 913 |
+
- Cinematic transitions
|
| 914 |
+
|
| 915 |
+
**π Completely free and open source!** Using only the latest and best AI models.
|
| 916 |
""")
|
| 917 |
|
| 918 |
if __name__ == "__main__":
|