kayfahaarukku commited on
Commit
2715a28
·
verified ·
1 Parent(s): 1885732

Revert lagi.

Browse files
Files changed (1) hide show
  1. app.py +7 -32
app.py CHANGED
@@ -48,39 +48,15 @@ def generate_image(prompt, negative_prompt, use_defaults, resolution, guidance_s
48
 
49
  torch.cuda.empty_cache()
50
 
51
- return image, seed, prompt, negative_prompt, guidance_scale, num_inference_steps, resolution
52
 
53
  # Define Gradio interface
54
  def interface_fn(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress=gr.Progress()):
55
- image, seed, prompt, negative_prompt, guidance_scale, num_inference_steps, resolution = generate_image(
56
- prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress
57
- )
58
-
59
- generation_details = (
60
- f"{prompt}\n"
61
- f"Negative Prompt: {negative_prompt}\n"
62
- f"Steps: {num_inference_steps}\n"
63
- f"Sampler: Euler a\n"
64
- f"CFG scale: {guidance_scale}\n"
65
- f"Seed: {seed}\n"
66
- f"Size: {resolution}"
67
- )
68
-
69
- return image, seed, gr.update(value=seed), gr.update(visible=True), generation_details
70
 
71
  def reset_inputs():
72
- return (
73
- gr.update(value=''),
74
- gr.update(value=''),
75
- gr.update(value=True),
76
- gr.update(value='832x1216'),
77
- gr.update(value=7),
78
- gr.update(value=28),
79
- gr.update(value=0),
80
- gr.update(value=True),
81
- gr.update(visible=False),
82
- ""
83
- )
84
 
85
  with gr.Blocks(title="UrangDiffusion 1.0 Demo", theme="NoCrypt/[email protected]") as demo:
86
  gr.HTML(
@@ -109,7 +85,6 @@ with gr.Blocks(title="UrangDiffusion 1.0 Demo", theme="NoCrypt/[email protected]") as d
109
 
110
  with gr.Column():
111
  output_image = gr.Image(type="pil", label="Generated Image")
112
- generation_details_output = gr.Markdown("", visible=False)
113
  gr.Markdown(
114
  """
115
  ### Recommended prompt formatting:
@@ -128,15 +103,15 @@ with gr.Blocks(title="UrangDiffusion 1.0 Demo", theme="NoCrypt/[email protected]") as d
128
  inputs=[
129
  prompt_input, negative_prompt_input, use_defaults_input, resolution_input, guidance_scale_input, num_inference_steps_input, seed_input, randomize_seed_input
130
  ],
131
- outputs=[output_image, seed_input, gr.update(value=seed), generation_details_output]
132
  )
133
 
134
  reset_button.click(
135
  reset_inputs,
136
  inputs=[],
137
  outputs=[
138
- prompt_input, negative_prompt_input, use_defaults_input, resolution_input, guidance_scale_input, num_inference_steps_input, seed_input, randomize_seed_input, generation_details_output, gr.update(value="")
139
  ]
140
  )
141
 
142
- demo.queue(max_size=20).launch(share=False)
 
48
 
49
  torch.cuda.empty_cache()
50
 
51
+ return image, seed
52
 
53
  # Define Gradio interface
54
  def interface_fn(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress=gr.Progress()):
55
+ image, seed = generate_image(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress)
56
+ return image, seed, gr.update(value=seed)
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
  def reset_inputs():
59
+ return gr.update(value=''), gr.update(value=''), gr.update(value=True), gr.update(value='832x1216'), gr.update(value=7), gr.update(value=28), gr.update(value=0), gr.update(value=True)
 
 
 
 
 
 
 
 
 
 
 
60
 
61
  with gr.Blocks(title="UrangDiffusion 1.0 Demo", theme="NoCrypt/[email protected]") as demo:
62
  gr.HTML(
 
85
 
86
  with gr.Column():
87
  output_image = gr.Image(type="pil", label="Generated Image")
 
88
  gr.Markdown(
89
  """
90
  ### Recommended prompt formatting:
 
103
  inputs=[
104
  prompt_input, negative_prompt_input, use_defaults_input, resolution_input, guidance_scale_input, num_inference_steps_input, seed_input, randomize_seed_input
105
  ],
106
+ outputs=[output_image, seed_input]
107
  )
108
 
109
  reset_button.click(
110
  reset_inputs,
111
  inputs=[],
112
  outputs=[
113
+ prompt_input, negative_prompt_input, use_defaults_input, resolution_input, guidance_scale_input, num_inference_steps_input, seed_input, randomize_seed_input
114
  ]
115
  )
116
 
117
+ demo.queue(max_size=20).launch(share=False)