File size: 2,615 Bytes
1f1b97c bb63fba 0dd7b09 976932f 1f1b97c 0dd7b09 976932f 1f1b97c 0dd7b09 1f1b97c 0dd7b09 1f1b97c 88d6e77 976932f 1f1b97c 0dd7b09 976932f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
import gradio as gr
import random, os, shutil
from PIL import Image
import pandas as pd
import tempfile
import zipfile
with zipfile.ZipFile("images/fair_diffusion.zip","r") as zip_ref:
zip_ref.extractall("images/")
with zipfile.ZipFile("images/stable_diffusion.zip","r") as zip_ref:
zip_ref.extractall("images/")
def open_stable_ims(profession):
if profession and len(profession) != 0:
dirname = 'images/stable_diffusion/'+ profession+'/'
if os.path.exists(dirname):
images = [Image.open(os.path.join(dirname+im)).convert("RGB") for im in os.listdir(dirname)]
random.shuffle(images)
return images[:16]
return []
def open_fair_ims(profession):
if profession and len(profession) != 0:
dirname = 'images/fair_diffusion/' + profession+'/'
if os.path.exists(dirname):
images = [Image.open(os.path.join(dirname+im)).convert("RGB") for im in os.listdir(dirname)]
random.shuffle(images)
return images[:16]
return []
professions = sorted(os.listdir('images/fair_diffusion'))
with gr.Blocks() as demo:
gr.Markdown("# Fair Diffusion Explorer")
gr.Markdown("#### Choose from the occupations below to compare how Stable Diffusion (left) and Fair Diffusion (right) represent different professions.")
with gr.Row():
with gr.Column():
gr.Markdown('## [Stable Diffusion v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) Generations')
choice1 = gr.Dropdown(professions, label="Choose a profession", multiselect=False, interactive=True)
images1 = gr.Gallery(label="Images")
with gr.Column():
gr.Markdown('## Fair Diffusion Generations')
choice2 = gr.Dropdown(professions, label="Choose a profession", multiselect=False, interactive=True)
images2 = gr.Gallery(label="Images")
gr.Markdown("We present a novel strategy, called **Fair Diffusion**, to attenuate biases after the deployment of generative text-to-image models. Specifically, we demonstrate shifting a bias, based on human instructions, in any direction yielding arbitrarily new proportions for, e.g., identity groups. As our empirical evaluation demonstrates, this introduced control enables instructing generative image models on fairness, with no data filtering and additional training required. For the full paper by Friedrich et al., see [here](https://arxiv.org/pdf/2302.10893.pdf).")
choice1.change(open_stable_ims, choice1, [images1])
choice2.change(open_fair_ims, choice2, [images2])
demo.launch() |