Time-Stream / app.py
ginipick's picture
Update app.py
b365a93 verified
raw
history blame
5.98 kB
import os
import uuid
import gradio as gr
import spaces
from clip_slider_pipeline import CLIPSliderFlux
from diffusers import FluxPipeline, AutoencoderTiny
import torch
import numpy as np
import cv2
from PIL import Image
from diffusers.utils import load_image
from diffusers.utils import export_to_video
import random
from transformers import pipeline
# Translation model load
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
# English menu labels
english_labels = {
"Prompt": "Prompt",
"1st direction to steer": "1st Direction",
"2nd direction to steer": "2nd Direction",
"Strength": "Strength",
"Generate directions": "Generate Directions",
"Generated Images": "Generated Images",
"From 1st to 2nd direction": "From 1st to 2nd Direction",
"Strip": "Image Strip",
"Looping video": "Looping Video",
"Advanced options": "Advanced Options",
"Num of intermediate images": "Number of Intermediate Images",
"Num iterations for clip directions": "Number of CLIP Direction Iterations",
"Num inference steps": "Number of Inference Steps",
"Guidance scale": "Guidance Scale",
"Randomize seed": "Randomize Seed",
"Seed": "Seed"
}
# [Rest of the imports and pipeline setup remains the same...]
css = """
footer {
visibility: hidden;
}
"""
with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as demo:
x_concept_1 = gr.State("")
x_concept_2 = gr.State("")
total_images = gr.Gallery(visible=False)
avg_diff_x = gr.State()
recalc_directions = gr.State(False)
with gr.Row():
with gr.Column():
with gr.Group():
prompt = gr.Textbox(label=english_labels["Prompt"],
info="Enter the description",
placeholder="A dog in the park")
with gr.Row():
concept_1 = gr.Textbox(label=english_labels["1st direction to steer"],
info="Initial state",
placeholder="winter")
concept_2 = gr.Textbox(label=english_labels["2nd direction to steer"],
info="Final state",
placeholder="summer")
x = gr.Slider(minimum=0,
value=1.75,
step=0.1,
maximum=4.0,
label=english_labels["Strength"],
info="Maximum strength for each direction (above 2.5 may be unstable)")
submit = gr.Button(english_labels["Generate directions"])
with gr.Column():
with gr.Group(elem_id="group"):
post_generation_image = gr.Image(label=english_labels["Generated Images"],
type="filepath",
elem_id="interactive")
post_generation_slider = gr.Slider(minimum=-10,
maximum=10,
value=0,
step=1,
label=english_labels["From 1st to 2nd direction"])
with gr.Row():
with gr.Column(scale=4):
image_seq = gr.Image(label=english_labels["Strip"],
elem_id="strip",
height=80)
with gr.Column(scale=2, min_width=100):
output_image = gr.Video(label=english_labels["Looping video"],
elem_id="video",
loop=True,
autoplay=True)
with gr.Accordion(label=english_labels["Advanced options"], open=False):
interm_steps = gr.Slider(label=english_labels["Num of intermediate images"],
minimum=3,
value=7,
maximum=65,
step=2)
with gr.Row():
iterations = gr.Slider(label=english_labels["Num iterations for clip directions"],
minimum=0,
value=200,
maximum=400,
step=1)
steps = gr.Slider(label=english_labels["Num inference steps"],
minimum=1,
value=3,
maximum=4,
step=1)
with gr.Row():
guidance_scale = gr.Slider(
label=english_labels["Guidance scale"],
minimum=0.1,
maximum=10.0,
step=0.1,
value=3.5,
)
with gr.Column():
randomize_seed = gr.Checkbox(True, label=english_labels["Randomize seed"])
seed = gr.Slider(minimum=0,
maximum=MAX_SEED,
step=1,
label=english_labels["Seed"],
interactive=True,
randomize=True)
# Updated examples with English text
examples = [
["flower in mountain", "spring", "winter", 1.5],
["man", "baby", "elderly", 2.5],
["a tomato", "super fresh", "rotten", 2.5]
]
examples_gradio = gr.Examples(
examples=examples,
inputs=[prompt, concept_1, concept_2, x],
fn=generate,
outputs=[x_concept_1, x_concept_2, avg_diff_x, output_image, image_seq, total_images,
post_generation_image, post_generation_slider, seed],
cache_examples="lazy"
)
# [Rest of the event handlers remain the same...]
if __name__ == "__main__":
demo.launch()