meg HF staff cakiki commited on
Commit
3911108
·
1 Parent(s): 7df4a81

model -> system (#4)

Browse files

- model -> system (0f6889d13b89642d5672e806a8eee47a8f4e5f51)
- Update app.py (a51c3c95bd6ad454c16c41f88908fad2b7bc5bbe)
- Update app.py (592921df1d6e9a4ce48262df5ed8d1da38aa4ca8)


Co-authored-by: Christopher Akiki <[email protected]>

Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -73,7 +73,7 @@ def show_cluster(cl_id, num_clusters):
73
  model_fig.add_trace(go.Pie(labels=list(dict(cl_dct["labels_model"]).keys()),
74
  values=list(
75
  dict(cl_dct["labels_model"]).values())))
76
- model_description = describe_cluster(dict(cl_dct["labels_model"]), "model")
77
 
78
  gender_fig = go.Figure()
79
  gender_fig.add_trace(
@@ -100,11 +100,11 @@ with gr.Blocks(title=TITLE) as demo:
100
  gr.Markdown(
101
  "## Explore the data generated from [DiffusionBiasExplorer](https://huggingface.co/spaces/society-ethics/DiffusionBiasExplorer)!")
102
  gr.Markdown(
103
- "### This demo showcases patterns in the images generated from different prompts input to Stable Diffusion and Dalle-2 diffusion models.")
104
  gr.Markdown(
105
  "### Below, see results on how the images from different prompts cluster together.")
106
  gr.HTML(
107
- """<span style="color:red" font-size:smaller>⚠️ DISCLAIMER: the images displayed by this tool were generated by text-to-image models and may depict offensive stereotypes or contain explicit content.</span>""")
108
  num_clusters = gr.Radio([12, 24, 48], value=12,
109
  label="How many clusters do you want to make from the data?")
110
 
@@ -120,7 +120,7 @@ with gr.Blocks(title=TITLE) as demo:
120
  a = gr.Text(label="Number of images")
121
  with gr.Row():
122
  with gr.Column(scale=1):
123
- c = gr.Plot(label="How many images from each model?")
124
  c_desc = gr.HTML(label="")
125
  with gr.Column(scale=1):
126
  b = gr.Plot(label="How many gender terms are represented?")
@@ -129,7 +129,7 @@ with gr.Blocks(title=TITLE) as demo:
129
  d = gr.Plot(label="Which ethnicity terms are present?")
130
 
131
  gr.Markdown(
132
- f"The 'Model makeup' plot corresponds to the number of images from the cluster that come from each of the TTI systems that we are comparing: Dall-E 2, Stable Diffusion v.1.4. and Stable Diffusion v.2.")
133
  gr.Markdown(
134
  'The Gender plot shows the number of images based on the input prompts that used the words man, woman, non-binary person, and unmarked, which we label "person".')
135
  gr.Markdown(
 
73
  model_fig.add_trace(go.Pie(labels=list(dict(cl_dct["labels_model"]).keys()),
74
  values=list(
75
  dict(cl_dct["labels_model"]).values())))
76
+ model_description = describe_cluster(dict(cl_dct["labels_model"]), "system")
77
 
78
  gender_fig = go.Figure()
79
  gender_fig.add_trace(
 
100
  gr.Markdown(
101
  "## Explore the data generated from [DiffusionBiasExplorer](https://huggingface.co/spaces/society-ethics/DiffusionBiasExplorer)!")
102
  gr.Markdown(
103
+ "### This demo showcases patterns in the images generated from different prompts input to Stable Diffusion and Dalle-2 systems.")
104
  gr.Markdown(
105
  "### Below, see results on how the images from different prompts cluster together.")
106
  gr.HTML(
107
+ """<span style="color:red" font-size:smaller>⚠️ DISCLAIMER: the images displayed by this tool were generated by text-to-image systems and may depict offensive stereotypes or contain explicit content.</span>""")
108
  num_clusters = gr.Radio([12, 24, 48], value=12,
109
  label="How many clusters do you want to make from the data?")
110
 
 
120
  a = gr.Text(label="Number of images")
121
  with gr.Row():
122
  with gr.Column(scale=1):
123
+ c = gr.Plot(label="How many images from each system?")
124
  c_desc = gr.HTML(label="")
125
  with gr.Column(scale=1):
126
  b = gr.Plot(label="How many gender terms are represented?")
 
129
  d = gr.Plot(label="Which ethnicity terms are present?")
130
 
131
  gr.Markdown(
132
+ f"The 'System makeup' plot corresponds to the number of images from the cluster that come from each of the TTI systems that we are comparing: Dall-E 2, Stable Diffusion v.1.4. and Stable Diffusion v.2.")
133
  gr.Markdown(
134
  'The Gender plot shows the number of images based on the input prompts that used the words man, woman, non-binary person, and unmarked, which we label "person".')
135
  gr.Markdown(