Spaces:
Running
on
Zero
Running
on
Zero
File size: 4,766 Bytes
6686859 dd8afa8 6686859 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
import os
import random
from groq import Groq
from openai import OpenAI
from gradio_client import Client
class VideoLLMInferenceNode:
def __init__(self):
self.huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
self.groq_api_key = os.getenv("GROQ_API_KEY")
self.sambanova_api_key = os.getenv("SAMBANOVA_API_KEY")
self.huggingface_client = OpenAI(
base_url="https://api-inference.huggingface.co/v1/",
api_key=self.huggingface_token,
)
self.groq_client = Groq(api_key=self.groq_api_key)
self.sambanova_client = OpenAI(
api_key=self.sambanova_api_key,
base_url="https://api.sambanova.ai/v1",
)
def generate_video_prompt(
self,
input_concept,
duration,
style,
camera_style,
pacing,
special_effects,
custom_elements,
provider="Hugging Face",
model=None
):
try:
# Video prompt templates
prompt_templates = {
"cinematic": f"""Create a detailed cinematic prompt for a {duration}-second video. Include:
- 3-5 distinct scenes with smooth transitions
- Camera movements: {camera_style}
- Lighting design for {style} style
- Special effects: {special_effects}
- Color grading and film grain details
- Pacing: {pacing}
- Add {custom_elements if custom_elements else 'unique atmospheric elements'}
Format: Timestamped scene descriptions with shot types and transition notes.""",
"documentary": f"""Develop a documentary-style video prompt for {duration} seconds. Include:
- Interview setup with lighting and background
- B-roll sequences (3-5 locations)
- Archival footage integration
- Text overlay and info-graphics
- Narration style and tone
- {camera_style} camera work
- {pacing} rhythm for topic exploration
- {special_effects} for historical recreations""",
"animation": f"""Create a {style} animation prompt for {duration} seconds. Specify:
- Animation technique (2D/3D/stop-motion)
- Key action sequences (3-5)
- Character design elements
- Background art style
- Motion blur and frame rate considerations
- Camera zooms/pans for {pacing} pacing
- Special effects: {special_effects}
- {custom_elements if custom_elements else 'unique stylistic flourishes'}""",
"action": f"""Generate intense action sequence prompt ({duration} seconds). Include:
- 3 escalating action beats
- Camera angles for {style} impact
- Stunt choreography details
- Slow-motion/fast-cut ratios
- Explosion/sfx elements: {special_effects}
- Pacing structure: {pacing}
- {camera_style} camera movements
- Hero shot composition""",
"experimental": f"""Design avant-garde video prompt ({duration} seconds) with:
- Unconventional narrative structure
- {style} visual treatments
- Abstract transitions between {random.randint(5,8)} concepts
- Experimental sound/image relationships
- {camera_style} capture techniques
- {special_effects} digital manipulations
- Pacing: {pacing} with {custom_elements if custom_elements else 'temporal distortions'}"""
}
base_prompt = prompt_templates.get(style.lower(), prompt_templates["cinematic"])
system_message = """You are a professional video director and cinematography expert.
Generate rich, technical video prompts that include:
1. Scene-by-scene breakdowns with timestamps
2. Camera movements and lens specifications
3. Lighting setups and color palettes
4. Transition types and durations
5. Special effects implementation
6. Pacing and rhythm markers
7. Technical specifications when relevant"""
# Select provider
if provider == "Hugging Face":
client = self.huggingface_client
model = model or "meta-llama/Meta-Llama-3.1-70B-Instruct"
elif provider == "Groq":
client = self.groq_client
model = model or "llama-3.3-70b-versatile"
elif provider == "SambaNova":
client = self.sambanova_client
model = model or "Meta-Llama-3.1-70B-Instruct"
response = client.chat.completions.create(
model=model,
messages=[
{"role": "system", "content": system_message},
{"role": "user", "content": f"{base_prompt}\nCore Concept: {input_concept}"}
],
temperature=1.2,
max_tokens=1500,
top_p=0.95,
seed=random.randint(0, 10000)
)
return response.choices[0].message.content.strip()
except Exception as e:
return f"Error generating video prompt: {str(e)}" |