import gradio as gr from llm_inference_video import VideoLLMInferenceNode import random title = """
Generate cinematic video prompts with technical specifications
""" def create_video_interface(): llm_node = VideoLLMInferenceNode() with gr.Blocks(theme='bethecloud/storj_theme') as demo: gr.HTML(title) with gr.Row(): with gr.Column(scale=2): input_concept = gr.Textbox(label="Core Concept/Thematic Input", lines=3) duration = gr.Slider(15, 600, value=60, label="Duration (seconds)") style = gr.Dropdown( choices=["Cinematic", "Documentary", "Animation", "Action", "Experimental"], value="Cinematic", label="Video Style" ) camera_style = gr.Dropdown( choices=["Steadicam flow", "Drone aerials", "Handheld urgency", "Crane elegance", "Dolly precision", "VR 360", "Multi-angle rig"], value="Steadicam flow", label="Camera Movement Style" ) with gr.Column(scale=2): pacing = gr.Dropdown( choices=["Slow burn", "Rhythmic pulse", "Frantic energy", "Ebb and flow", "Hypnotic drift"], value="Rhythmic pulse", label="Pacing Rhythm" ) special_effects = gr.Dropdown( choices=["Practical effects", "CGI enhancement", "Analog glitches", "Light painting", "Projection mapping", "Nanosecond exposures"], value="Practical effects", label="SFX Approach" ) custom_elements = gr.Textbox(label="Custom Technical Elements", placeholder="e.g., Infrared hybrid, Datamosh transitions") with gr.Row(): custom_elements = gr.Textbox(label="Custom Elements", scale=3) with gr.Column(scale=1): provider = gr.Dropdown( choices=["Hugging Face", "Groq", "SambaNova"], value="Hugging Face", label="LLM Provider" ) model = gr.Dropdown(label="Model", value="Qwen/Qwen2.5-72B-Instruct") generate_btn = gr.Button("Generate Video Prompt", variant="primary") output = gr.Textbox(label="Generated Prompt", lines=12, show_copy_button=True) def update_models(provider): models = { "Hugging Face": [ "Qwen/Qwen2.5-72B-Instruct", "meta-llama/Meta-Llama-3.1-70B-Instruct", "mistralai/Mixtral-8x7B-Instruct-v0.1", "mistralai/Mistral-7B-Instruct-v0.3" ], "Groq": ["llama-3.1-70b-versatile"], "SambaNova": [ "Meta-Llama-3.1-70B-Instruct", "Meta-Llama-3.1-405B-Instruct", "Meta-Llama-3.1-8B-Instruct" ] } return gr.Dropdown(choices=models[provider], value=models[provider][0]) provider.change(update_models, inputs=provider, outputs=model) generate_btn.click( llm_node.generate_video_prompt, inputs=[input_concept, duration, style, camera_style, pacing, special_effects, custom_elements, provider, model], outputs=output ) return demo if __name__ == "__main__": demo = create_video_interface() demo.launch(share=True)