amos1088 commited on
Commit
c5aaf64
·
1 Parent(s): b4da1bd

test gradio

Browse files
Files changed (1) hide show
  1. app.py +3 -55
app.py CHANGED
@@ -79,41 +79,6 @@ gif_pipe = AnimateDiffSparseControlNetPipeline.from_pretrained(
79
  gif_pipe.load_lora_weights(lora_adapter_id, adapter_name="motion_lora")
80
 
81
 
82
-
83
- @spaces.GPU
84
- def generate_frames(prompt, reference_image, controlnet_conditioning_scale,num_frames):
85
- massage_history = [{"role": "system", "content": """
86
- You are a scene designer tasked with creating sparse frames of a video. You will be given a prompt describing the desired video, and your goal is to design only the key frames (sparse frames) that represent major changes in the scene. Do not include repetitive or similar scenes—only capture distinct moments.
87
-
88
- Expected Format:
89
- Return the response as a JSON object with the key "frames". The value should be a list of dictionaries, where each dictionary has:
90
-
91
- "frame_index": an integer indicating the frame's position in the sequence.
92
- "description": a brief description of the scene in this frame.
93
- Example:
94
- If given a prompt like "A sunset over a beach with waves crashing and a ship sailing by," your response should look like this:
95
-
96
- ```json
97
- {
98
- "frames": [
99
- {"frame_index": 0, "description": "Sunset over an empty beach, sky turning orange and pink"},
100
- {"frame_index": 30, "description": "Waves gently crashing on the shore"},
101
- {"frame_index": 60, "description": "A ship appears on the horizon, silhouetted by the sunset"},
102
- {"frame_index": 90, "description": "Ship sailing closer, with waves becoming more dynamic"},
103
- {"frame_index": 120, "description": "Sun dipping below the horizon, casting a golden glow over the water"}
104
- ]
105
- }
106
- ```
107
- This way, each frame represents a distinct scene, and there’s no redundancy between them."""},
108
- {"role": "user", "content": f"give me the frames to generate a video with prompt : `{prompt}`"},]
109
- frames = ask_gpt(massage_history,return_str=False)['frames']
110
- conditioning_frames = []
111
- controlnet_frame_indices =[]
112
- for frame in frames:
113
- conditioning_frames.append(generate_image(frame['description'], reference_image, controlnet_conditioning_scale))
114
- controlnet_frame_indices.append(frame['frame_index'])
115
- return conditioning_frames
116
-
117
  @spaces.GPU
118
  def generate_gif(prompt, reference_image, controlnet_conditioning_scale,style_conditioning_scale,num_frames):
119
  massage_history = [{"role": "system", "content": """
@@ -146,7 +111,7 @@ This way, each frame represents a distinct scene, and there’s no redundancy be
146
  for frame in frames:
147
  conditioning_frames.append(generate_image(frame['description'], reference_image, float(style_conditioning_scale)))
148
  controlnet_frame_indices.append(frame['frame_index'])
149
- yield conditioning_frames, "animation.gif"
150
 
151
  video = gif_pipe(
152
  prompt=prompt,
@@ -159,7 +124,7 @@ This way, each frame represents a distinct scene, and there’s no redundancy be
159
  ).frames[0]
160
  export_to_gif(video, "output.gif")
161
 
162
- yield conditioning_frames, "animation.gif"
163
 
164
  # Set up Gradio interface
165
  interface = gr.Interface(
@@ -169,7 +134,7 @@ interface = gr.Interface(
169
  # gr.Image( type= "filepath",label="Reference Image (Style)"),
170
  gr.File(type="filepath",file_count="multiple",label="Reference Image (Style)"),
171
  gr.Slider(label="Control Net Conditioning Scale", minimum=0, maximum=1.0, step=0.1, value=1.0),
172
- gr.Slider(label="Style Scale", minimum=0, maximum=1.0, step=0.1, value=1.0),
173
  gr.Slider(label="Number of frames", minimum=0, maximum=100.0, step=1.0, value=10.0),
174
 
175
  ],
@@ -179,23 +144,6 @@ interface = gr.Interface(
179
 
180
  )
181
 
182
- # # Set up Gradio interface
183
- # interface = gr.Interface(
184
- # fn=generate_frames,
185
- # inputs=[
186
- # gr.Textbox(label="Prompt"),
187
- # # gr.Image( type= "filepath",label="Reference Image (Style)"),
188
- # gr.File(type="filepath",file_count="multiple",label="Reference Image (Style)"),
189
- # gr.Slider(label="Control Net Conditioning Scale", minimum=0, maximum=1.0, step=0.1, value=1.0),
190
- # gr.Slider(label="Number of frames", minimum=0, maximum=1.0, step=0.1, value=1.0),
191
- #
192
- # ],
193
- # outputs="gallery",
194
- # title="Image Generation with Stable Diffusion 3 medium and ControlNet",
195
- # description="Generates an image based on a text prompt and a reference image using Stable Diffusion 3 medium with ControlNet."
196
- #
197
- # )
198
-
199
  interface.launch()
200
 
201
 
 
79
  gif_pipe.load_lora_weights(lora_adapter_id, adapter_name="motion_lora")
80
 
81
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
  @spaces.GPU
83
  def generate_gif(prompt, reference_image, controlnet_conditioning_scale,style_conditioning_scale,num_frames):
84
  massage_history = [{"role": "system", "content": """
 
111
  for frame in frames:
112
  conditioning_frames.append(generate_image(frame['description'], reference_image, float(style_conditioning_scale)))
113
  controlnet_frame_indices.append(frame['frame_index'])
114
+ yield (conditioning_frames, "animation.gif")
115
 
116
  video = gif_pipe(
117
  prompt=prompt,
 
124
  ).frames[0]
125
  export_to_gif(video, "output.gif")
126
 
127
+ yield (conditioning_frames, "animation.gif")
128
 
129
  # Set up Gradio interface
130
  interface = gr.Interface(
 
134
  # gr.Image( type= "filepath",label="Reference Image (Style)"),
135
  gr.File(type="filepath",file_count="multiple",label="Reference Image (Style)"),
136
  gr.Slider(label="Control Net Conditioning Scale", minimum=0, maximum=1.0, step=0.1, value=1.0),
137
+ gr.Slider(label="Style Scale", minimum=0, maximum=1.0, step=0.1, value=0.6),
138
  gr.Slider(label="Number of frames", minimum=0, maximum=100.0, step=1.0, value=10.0),
139
 
140
  ],
 
144
 
145
  )
146
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  interface.launch()
148
 
149