sxela commited on
Commit
866f637
Β·
1 Parent(s): 3288c2b

add blocks interface

Browse files
Files changed (1) hide show
  1. app.py +47 -5
app.py CHANGED
@@ -221,8 +221,50 @@ def inference(text, init_image, skip_timesteps, clip_guidance_scale, tv_scale, r
221
  writer.close()
222
  return img, 'video.mp4'
223
 
224
- title = "CLIP Guided Diffusion Faces Model"
225
- description = "Gradio demo for CLIP Guided Diffusion. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
226
- article = "<p style='text-align: center'> Comics faces model by <a href='https://linktree/devdef'>Alex Spirin</a>. Based on the original <a href='https://huggingface.co/spaces/EleutherAI/clip-guided-diffusion'>CLIP Guided Diffusion Space</a> by akhaliq / Katherine Crowson (https://github.com/crowsonkb, https://twitter.com/RiversHaveWings) | <a href='https://github.com/Sxela/DiscoDiffusion-Warp/blob/main/Disco_Diffusion_v5_2_Warp_custom_model.ipynb' target='_blank'>Colab</a></p>"
227
- iface = gr.Interface(inference, inputs=["text",gr.inputs.Image(type="file", label='initial image (optional)', optional=True),gr.inputs.Slider(minimum=0, maximum=45, step=1, default=10, label="skip_timesteps"), gr.inputs.Slider(minimum=0, maximum=3000, step=1, default=600, label="clip guidance scale (Controls how much the image should look like the prompt)"), gr.inputs.Slider(minimum=0, maximum=1000, step=1, default=0, label="tv_scale (Controls the smoothness of the final output)"), gr.inputs.Slider(minimum=0, maximum=1000, step=1, default=0, label="range_scale (Controls how far out of range RGB values are allowed to be)"), gr.inputs.Slider(minimum=0, maximum=1000, step=1, default=0, label="init_scale (This enhances the effect of the init image)"), gr.inputs.Number(default=0, label="Seed"), gr.inputs.Image(type="file", label='image prompt (optional)', optional=True), gr.inputs.Slider(minimum=50, maximum=500, step=1, default=50, label="timestep respacing"),gr.inputs.Slider(minimum=1, maximum=64, step=1, default=32, label="cutn")], outputs=["image","video"], title=title, description=description, article=article, examples=[["Brad Pitt", "face.jpeg", 0, 1000, 150, 50, 0, 0, "face.jpeg", 90, 32]])
228
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221
  writer.close()
222
  return img, 'video.mp4'
223
 
224
+ demo = gr.Blocks()
225
+ with demo:
226
+ gr.Markdown(
227
+ """
228
+ # CLIP Guided Diffusion Faces Model
229
+ ### by [Alex Spirin](https://linktr.ee/devdef)
230
+ Gradio Blocks demo for CLIP Guided Diffusion. To use it, simply add your text, or click one of the examples to load them.
231
+ Based on the original [Space](https://huggingface.co/spaces/EleutherAI/clip-guided-diffusion) by akhaliq.
232
+ """)
233
+
234
+ with gr.Row():
235
+ text = gr.Textbox(placeholder="Enter a description of a face", label='Text prompt')
236
+ with gr.Row():
237
+ with gr.Column():
238
+ clip_guidance_scale = gr.Slider(minimum=0, maximum=3000, step=1, value=600, label="Prompt strength")
239
+ tv_scale = gr.Slider(minimum=0, maximum=1000, step=1, value=0, label="Smoothness")
240
+ range_scale = gr.Slider(minimum=0, maximum=1000, step=1, value=0, label="Compress color range")
241
+
242
+ with gr.Column():
243
+ timestep_respacing = gr.Slider(minimum=25, maximum=100, step=1, value=50, label="timestep respacing")
244
+ cutn = gr.Slider(minimum=4, maximum=32, step=1, value=16, label="cutn")
245
+ seed = gr.Number(value=0, label="Seed")
246
+ with gr.Row():
247
+ with gr.Column():
248
+ init_image = gr.Image(source="upload", label='initial image (optional)')
249
+ init_scale = gr.Slider(minimum=0, maximum=45, step=1, value=10, label="Look like the image above")
250
+
251
+ with gr.Column():
252
+ image_prompts = gr.Image(source="upload", label='image prompt (optional)')
253
+ skip_timesteps = gr.Slider(minimum=0, maximum=1000, step=1, value=0, label="Look like the image above")
254
+
255
+ with gr.Row():
256
+ run_button = gr.Button("Run!")
257
+ with gr.Row():
258
+ gr.Markdown(
259
+ """
260
+ # Results
261
+ """)
262
+ with gr.Row():
263
+ output_image = gr.Image(label='Output image', type='numpy')
264
+ output_video = gr.Video(label='Output video')
265
+
266
+ outputs=[output_image,output_video]
267
+
268
+ run_button.click(inference, inputs=[text, init_image, skip_timesteps, clip_guidance_scale, tv_scale, range_scale, init_scale, seed, image_prompts,timestep_respacing, cutn], outputs=outputs)
269
+
270
+ demo.launch(enable_queue=True)