amos1088 commited on
Commit
2fd610d
·
1 Parent(s): 1103202

test gradio

Browse files
Files changed (1) hide show
  1. app.py +23 -20
app.py CHANGED
@@ -77,6 +77,7 @@ gif_pipe = AnimateDiffSparseControlNetPipeline.from_pretrained(
77
  torch_dtype=torch.float16,
78
  ).to(device)
79
  gif_pipe.load_lora_weights(lora_adapter_id, adapter_name="motion_lora")
 
80
 
81
 
82
 
@@ -143,12 +144,14 @@ This way, each frame represents a distinct scene, and there’s no redundancy be
143
  frames = ask_gpt(massage_history,return_str=False)['frames']
144
  conditioning_frames = []
145
  controlnet_frame_indices =[]
 
146
  for frame in frames:
147
  conditioning_frames.append(generate_image(frame['description'], reference_image, float(controlnet_conditioning_scale)))
148
  controlnet_frame_indices.append(frame['frame_index'])
 
149
 
150
  video = gif_pipe(
151
- prompt=prompt,
152
  negative_prompt="low quality, worst quality",
153
  num_inference_steps=25,
154
  conditioning_frames=conditioning_frames,
@@ -159,9 +162,26 @@ This way, each frame represents a distinct scene, and there’s no redundancy be
159
 
160
  return "animation.gif"
161
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
  # # Set up Gradio interface
163
  # interface = gr.Interface(
164
- # fn=generate_gif,
165
  # inputs=[
166
  # gr.Textbox(label="Prompt"),
167
  # # gr.Image( type= "filepath",label="Reference Image (Style)"),
@@ -170,29 +190,12 @@ This way, each frame represents a distinct scene, and there’s no redundancy be
170
  # gr.Slider(label="Number of frames", minimum=0, maximum=1.0, step=0.1, value=1.0),
171
  #
172
  # ],
173
- # outputs="image",
174
  # title="Image Generation with Stable Diffusion 3 medium and ControlNet",
175
  # description="Generates an image based on a text prompt and a reference image using Stable Diffusion 3 medium with ControlNet."
176
  #
177
  # )
178
 
179
- # Set up Gradio interface
180
- interface = gr.Interface(
181
- fn=generate_frames,
182
- inputs=[
183
- gr.Textbox(label="Prompt"),
184
- # gr.Image( type= "filepath",label="Reference Image (Style)"),
185
- gr.File(type="filepath",file_count="multiple",label="Reference Image (Style)"),
186
- gr.Slider(label="Control Net Conditioning Scale", minimum=0, maximum=1.0, step=0.1, value=1.0),
187
- gr.Slider(label="Number of frames", minimum=0, maximum=1.0, step=0.1, value=1.0),
188
-
189
- ],
190
- outputs="gallery",
191
- title="Image Generation with Stable Diffusion 3 medium and ControlNet",
192
- description="Generates an image based on a text prompt and a reference image using Stable Diffusion 3 medium with ControlNet."
193
-
194
- )
195
-
196
  interface.launch()
197
 
198
 
 
77
  torch_dtype=torch.float16,
78
  ).to(device)
79
  gif_pipe.load_lora_weights(lora_adapter_id, adapter_name="motion_lora")
80
+ gif_pipe.enable_free_noise(context_length=16, context_stride=4)
81
 
82
 
83
 
 
144
  frames = ask_gpt(massage_history,return_str=False)['frames']
145
  conditioning_frames = []
146
  controlnet_frame_indices =[]
147
+ long_prompt = {}
148
  for frame in frames:
149
  conditioning_frames.append(generate_image(frame['description'], reference_image, float(controlnet_conditioning_scale)))
150
  controlnet_frame_indices.append(frame['frame_index'])
151
+ long_prompt[frame['frame_index']] = frame['description']
152
 
153
  video = gif_pipe(
154
+ prompt=long_prompt,
155
  negative_prompt="low quality, worst quality",
156
  num_inference_steps=25,
157
  conditioning_frames=conditioning_frames,
 
162
 
163
  return "animation.gif"
164
 
165
+ # Set up Gradio interface
166
+ interface = gr.Interface(
167
+ fn=generate_gif,
168
+ inputs=[
169
+ gr.Textbox(label="Prompt"),
170
+ # gr.Image( type= "filepath",label="Reference Image (Style)"),
171
+ gr.File(type="filepath",file_count="multiple",label="Reference Image (Style)"),
172
+ gr.Slider(label="Control Net Conditioning Scale", minimum=0, maximum=1.0, step=0.1, value=1.0),
173
+ gr.Slider(label="Number of frames", minimum=0, maximum=100.0, step=1.0, value=10.0),
174
+
175
+ ],
176
+ outputs="image",
177
+ title="Image Generation with Stable Diffusion 3 medium and ControlNet",
178
+ description="Generates an image based on a text prompt and a reference image using Stable Diffusion 3 medium with ControlNet."
179
+
180
+ )
181
+
182
  # # Set up Gradio interface
183
  # interface = gr.Interface(
184
+ # fn=generate_frames,
185
  # inputs=[
186
  # gr.Textbox(label="Prompt"),
187
  # # gr.Image( type= "filepath",label="Reference Image (Style)"),
 
190
  # gr.Slider(label="Number of frames", minimum=0, maximum=1.0, step=0.1, value=1.0),
191
  #
192
  # ],
193
+ # outputs="gallery",
194
  # title="Image Generation with Stable Diffusion 3 medium and ControlNet",
195
  # description="Generates an image based on a text prompt and a reference image using Stable Diffusion 3 medium with ControlNet."
196
  #
197
  # )
198
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199
  interface.launch()
200
 
201