Chroma-Extra / app.py
gokaygokay's picture
Create app.py
a7438d2 verified
raw
history blame
2.89 kB
import gradio as gr
from llm_inference_video import VideoLLMInferenceNode
import random
title = """<h1 align="center">CineGen: AI Video Prompt Architect</h1>
<p align="center">Generate cinematic video prompts with technical specifications</p>"""
def create_video_interface():
llm_node = VideoLLMInferenceNode()
with gr.Blocks(theme='bethecloud/storj_theme') as demo:
gr.HTML(title)
with gr.Row():
with gr.Column(scale=2):
input_concept = gr.Textbox(label="Core Concept/Thematic Input", lines=3)
duration = gr.Slider(15, 600, value=60, label="Duration (seconds)")
style = gr.Dropdown(
choices=["Cinematic", "Documentary", "Animation", "Action", "Experimental"],
value="Cinematic",
label="Video Style"
)
camera_style = gr.Dropdown(
choices=["Steadicam flow", "Drone aerials", "Handheld urgency", "Crane elegance",
"Dolly precision", "VR 360", "Multi-angle rig"],
value="Steadicam flow",
label="Camera Movement Style"
)
with gr.Column(scale=2):
pacing = gr.Dropdown(
choices=["Slow burn", "Rhythmic pulse", "Frantic energy", "Ebb and flow", "Hypnotic drift"],
value="Rhythmic pulse",
label="Pacing Rhythm"
)
special_effects = gr.Dropdown(
choices=["Practical effects", "CGI enhancement", "Analog glitches",
"Light painting", "Projection mapping", "Nanosecond exposures"],
value="Practical effects",
label="SFX Approach"
)
custom_elements = gr.Textbox(label="Custom Technical Elements",
placeholder="e.g., Infrared hybrid, Datamosh transitions")
provider = gr.Dropdown(
choices=["Hugging Face", "Groq", "SambaNova"],
value="Hugging Face",
label="LLM Provider"
)
model = gr.Dropdown(label="Model", value="meta-llama/Meta-Llama-3.1-70B-Instruct")
generate_btn = gr.Button("Generate Video Blueprint", variant="primary")
output = gr.Textbox(label="Video Production Prompt", lines=12, interactive=False)
generate_btn.click(
llm_node.generate_video_prompt,
inputs=[input_concept, duration, style, camera_style, pacing, special_effects,
custom_elements, provider, model],
outputs=output
)
return demo
if __name__ == "__main__":
demo = create_video_interface()
demo.launch(share=True)