Chroma-Extra / app.py
gokaygokay's picture
fix models
8c8a286
raw
history blame
5.96 kB
import gradio as gr
from llm_inference_video import VideoLLMInferenceNode
import random
title = """<h1 align="center">CineGen: AI Video Prompt Architect</h1>
<p align="center">Generate cinematic video prompts with technical specifications</p>"""
def create_video_interface():
llm_node = VideoLLMInferenceNode()
with gr.Blocks(theme='bethecloud/storj_theme') as demo:
gr.HTML(title)
with gr.Row():
with gr.Column(scale=2):
input_concept = gr.Textbox(label="Core Concept/Thematic Input", lines=3)
style = gr.Dropdown(
choices=["Minimalist", "Simple", "Detailed", "Descriptive", "Dynamic",
"Cinematic", "Documentary", "Animation", "Action", "Experimental"],
value="Simple",
label="Video Style"
)
camera_style = gr.Dropdown(
choices=[
"None",
"Steadicam flow", "Drone aerials", "Handheld urgency", "Crane elegance",
"Dolly precision", "VR 360", "Multi-angle rig", "Static tripod",
"Gimbal smoothness", "Slider motion", "Jib sweep", "POV immersion",
"Time-slice array", "Macro extreme", "Tilt-shift miniature",
"Snorricam character", "Whip pan dynamics", "Dutch angle tension",
"Underwater housing", "Periscope lens"
],
value="None",
label="Camera Movement Style"
)
camera_direction = gr.Dropdown(
choices=[
"None",
"Zoom in", "Zoom out", "Pan left", "Pan right",
"Tilt up", "Tilt down", "Orbital rotation",
"Push in", "Pull out", "Track forward", "Track backward",
"Spiral in", "Spiral out", "Arc movement",
"Diagonal traverse", "Vertical rise", "Vertical descent"
],
value="None",
label="Camera Direction"
)
with gr.Column(scale=2):
pacing = gr.Dropdown(
choices=[
"None",
"Slow burn", "Rhythmic pulse", "Frantic energy", "Ebb and flow",
"Hypnotic drift", "Time-lapse rush", "Stop-motion staccato",
"Gradual build", "Quick cut rhythm", "Long take meditation",
"Jump cut energy", "Match cut flow", "Cross-dissolve dreamscape",
"Parallel action", "Slow motion impact", "Ramping dynamics",
"Montage tempo", "Continuous flow", "Episodic breaks"
],
value="None",
label="Pacing Rhythm"
)
special_effects = gr.Dropdown(
choices=[
"None",
"Practical effects", "CGI enhancement", "Analog glitches",
"Light painting", "Projection mapping", "Nanosecond exposures",
"Double exposure", "Smoke diffusion", "Lens flare artistry",
"Particle systems", "Holographic overlay", "Chromatic aberration",
"Digital distortion", "Wire removal", "Motion capture",
"Miniature integration", "Weather simulation", "Color grading",
"Mixed media composite", "Neural style transfer"
],
value="None",
label="SFX Approach"
)
custom_elements = gr.Textbox(label="Custom Technical Elements",
placeholder="e.g., Infrared hybrid, Datamosh transitions")
with gr.Row():
custom_elements = gr.Textbox(label="Custom Elements", scale=3)
with gr.Column(scale=1):
provider = gr.Dropdown(
choices=["SambaNova", "Groq"],
value="SambaNova",
label="LLM Provider"
)
model = gr.Dropdown(
choices=[
"Meta-Llama-3.1-70B-Instruct",
"Meta-Llama-3.1-405B-Instruct",
"Meta-Llama-3.1-8B-Instruct"
],
value="Meta-Llama-3.1-70B-Instruct",
label="Model"
)
prompt_length = gr.Dropdown(
choices=["Short", "Medium", "Long"],
value="Medium",
label="Prompt Length"
)
generate_btn = gr.Button("Generate Video Prompt", variant="primary")
output = gr.Textbox(label="Generated Prompt", lines=12, show_copy_button=True)
def update_models(provider):
models = {
"Groq": ["llama-3.3-70b-versatile"],
"SambaNova": [
"Meta-Llama-3.1-70B-Instruct",
"Meta-Llama-3.1-405B-Instruct",
"Meta-Llama-3.1-8B-Instruct"
]
}
return gr.Dropdown(choices=models[provider], value=models[provider][0])
provider.change(update_models, inputs=provider, outputs=model)
generate_btn.click(
llm_node.generate_video_prompt,
inputs=[input_concept, style, camera_style, camera_direction, pacing, special_effects,
custom_elements, provider, model, prompt_length],
outputs=output
)
return demo
if __name__ == "__main__":
demo = create_video_interface()
demo.launch(share=True)