Chroma-Extra / app.py
gokaygokay's picture
fix models
1e26e1c
raw
history blame
3.64 kB
import gradio as gr
from llm_inference_video import VideoLLMInferenceNode
import random
title = """<h1 align="center">CineGen: AI Video Prompt Architect</h1>
<p align="center">Generate cinematic video prompts with technical specifications</p>"""
def create_video_interface():
llm_node = VideoLLMInferenceNode()
with gr.Blocks(theme='bethecloud/storj_theme') as demo:
gr.HTML(title)
with gr.Row():
with gr.Column(scale=2):
input_concept = gr.Textbox(label="Core Concept/Thematic Input", lines=3)
style = gr.Dropdown(
choices=["Cinematic", "Documentary", "Animation", "Action", "Experimental"],
value="Cinematic",
label="Video Style"
)
camera_style = gr.Dropdown(
choices=["Steadicam flow", "Drone aerials", "Handheld urgency", "Crane elegance",
"Dolly precision", "VR 360", "Multi-angle rig"],
value="Steadicam flow",
label="Camera Movement Style"
)
with gr.Column(scale=2):
pacing = gr.Dropdown(
choices=["Slow burn", "Rhythmic pulse", "Frantic energy", "Ebb and flow", "Hypnotic drift"],
value="Rhythmic pulse",
label="Pacing Rhythm"
)
special_effects = gr.Dropdown(
choices=["Practical effects", "CGI enhancement", "Analog glitches",
"Light painting", "Projection mapping", "Nanosecond exposures"],
value="Practical effects",
label="SFX Approach"
)
custom_elements = gr.Textbox(label="Custom Technical Elements",
placeholder="e.g., Infrared hybrid, Datamosh transitions")
with gr.Row():
custom_elements = gr.Textbox(label="Custom Elements", scale=3)
with gr.Column(scale=1):
provider = gr.Dropdown(
choices=["SambaNova", "Groq"],
value="SambaNova",
label="LLM Provider"
)
model = gr.Dropdown(
choices=[
"Meta-Llama-3.1-70B-Instruct",
"Meta-Llama-3.1-405B-Instruct",
"Meta-Llama-3.1-8B-Instruct"
],
value="Meta-Llama-3.1-70B-Instruct",
label="Model"
)
generate_btn = gr.Button("Generate Video Prompt", variant="primary")
output = gr.Textbox(label="Generated Prompt", lines=12, show_copy_button=True)
def update_models(provider):
models = {
"Groq": ["llama-3.3-70b-versatile"],
"SambaNova": [
"Meta-Llama-3.1-70B-Instruct",
"Meta-Llama-3.1-405B-Instruct",
"Meta-Llama-3.1-8B-Instruct"
]
}
return gr.Dropdown(choices=models[provider], value=models[provider][0])
provider.change(update_models, inputs=provider, outputs=model)
generate_btn.click(
llm_node.generate_video_prompt,
inputs=[input_concept, style, camera_style, pacing, special_effects, custom_elements, provider, model],
outputs=output
)
return demo
if __name__ == "__main__":
demo = create_video_interface()
demo.launch(share=True)