Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -43,7 +43,6 @@ def infer(
|
|
43 |
height,
|
44 |
guidance_scale,
|
45 |
num_inference_steps,
|
46 |
-
progress=gr.Progress(track_tqdm=True),
|
47 |
):
|
48 |
global prev_width, prev_height, pipe
|
49 |
|
@@ -68,11 +67,6 @@ def infer(
|
|
68 |
generator=generator,
|
69 |
).images[0]
|
70 |
|
71 |
-
# # Save image as Base64
|
72 |
-
# buffered = BytesIO()
|
73 |
-
# image.save(buffered, format="PNG")
|
74 |
-
# base64_image = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
75 |
-
# return image, seed, f"data:image/png;base64,{base64_image}"
|
76 |
return image, seed
|
77 |
|
78 |
|
@@ -102,8 +96,6 @@ with gr.Blocks() as img:
|
|
102 |
|
103 |
result = gr.Image(label="Result", show_label=False)
|
104 |
|
105 |
-
# base64_view = gr.HTML(label="Base64 Image Preview", interactive=True)
|
106 |
-
|
107 |
with gr.Accordion("Advanced Settings", open=False):
|
108 |
negative_prompt = gr.Text(
|
109 |
label="Negative prompt",
|
@@ -174,19 +166,8 @@ with gr.Blocks() as img:
|
|
174 |
guidance_scale,
|
175 |
num_inference_steps,
|
176 |
],
|
177 |
-
# outputs=[result, seed, base64_view],
|
178 |
outputs=[result, seed],
|
179 |
)
|
180 |
|
181 |
-
# # JavaScript logic to dynamically update HTML with Base64
|
182 |
-
# js_script = """
|
183 |
-
# <script>
|
184 |
-
# function updateBase64(html_id, base64_src) {
|
185 |
-
# document.getElementById(html_id).innerHTML = `<img src="${base64_src}" alt="Generated Image"/>`;
|
186 |
-
# }
|
187 |
-
# </script>
|
188 |
-
# """
|
189 |
-
# gr.HTML(js_script)
|
190 |
-
|
191 |
if __name__ == "__main__":
|
192 |
img.queue(max_size=10).launch()
|
|
|
43 |
height,
|
44 |
guidance_scale,
|
45 |
num_inference_steps,
|
|
|
46 |
):
|
47 |
global prev_width, prev_height, pipe
|
48 |
|
|
|
67 |
generator=generator,
|
68 |
).images[0]
|
69 |
|
|
|
|
|
|
|
|
|
|
|
70 |
return image, seed
|
71 |
|
72 |
|
|
|
96 |
|
97 |
result = gr.Image(label="Result", show_label=False)
|
98 |
|
|
|
|
|
99 |
with gr.Accordion("Advanced Settings", open=False):
|
100 |
negative_prompt = gr.Text(
|
101 |
label="Negative prompt",
|
|
|
166 |
guidance_scale,
|
167 |
num_inference_steps,
|
168 |
],
|
|
|
169 |
outputs=[result, seed],
|
170 |
)
|
171 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
172 |
if __name__ == "__main__":
|
173 |
img.queue(max_size=10).launch()
|