update layout and bug fix for temp file name length
Browse files
app.py
CHANGED
@@ -17,7 +17,21 @@ pipe.enable_xformers_memory_efficient_attention()
|
|
17 |
pipe.unet.to(memory_format=torch.channels_last)
|
18 |
|
19 |
|
20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
If you're not getting what you want, there may be a few reasons:
|
23 |
1. Is the image not changing enough? Your guidance_scale may be too low. It should be >1. Higher guidance scale encourages to generate images
|
@@ -27,15 +41,48 @@ be to the input. This pipeline requires a value of at least `1`. It's possible y
|
|
27 |
2. Alternatively, you can toggle image_guidance_scale. Image guidance scale is to push the generated image towards the inital image. Image guidance
|
28 |
scale is enabled by setting `image_guidance_scale > 1`. Higher image guidance scale encourages to generate images that are closely
|
29 |
linked to the source image `image`, usually at the expense of lower image quality.
|
30 |
-
|
31 |
3. I have observed that rephrasing the instruction sometimes improves results (e.g., "turn him into a dog" vs. "make him a dog" vs. "as a dog").
|
32 |
-
|
33 |
4. Increasing the number of steps sometimes improves results.
|
34 |
-
|
35 |
5. Do faces look weird? The Stable Diffusion autoencoder has a hard time with faces that are small in the image. Try:
|
36 |
* Cropping the image so the face takes up a larger portion of the frame.
|
37 |
"""
|
38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
def previous(image):
|
40 |
return image
|
41 |
|
@@ -69,7 +116,7 @@ def chat(btn_upload, image_in, in_steps, in_guidance_scale, in_img_guidance_scal
|
|
69 |
edited_image = pipe(prompt, image=image_hid, num_inference_steps=int(in_steps), guidance_scale=float(in_guidance_scale), image_guidance_scale=float(in_img_guidance_scale)).images[0]
|
70 |
if os.path.exists(img_name):
|
71 |
os.remove(img_name)
|
72 |
-
temp_img_name = img_name[:-4]+str(int(time.time()))+'.png'
|
73 |
# Create a file-like object
|
74 |
with open(temp_img_name, "wb") as fp:
|
75 |
# Save the image to the file-like object
|
@@ -109,7 +156,7 @@ def chat(btn_upload, image_in, in_steps, in_guidance_scale, in_img_guidance_scal
|
|
109 |
edited_image = pipe(prompt, image=image_in, num_inference_steps=int(in_steps), guidance_scale=float(in_guidance_scale), image_guidance_scale=float(in_img_guidance_scale)).images[0]
|
110 |
if os.path.exists(img_name):
|
111 |
os.remove(img_name)
|
112 |
-
temp_img_name = img_name[:-4]+str(int(time.time()))+'.png'
|
113 |
with open(temp_img_name, "wb") as fp:
|
114 |
# Save the image to the file-like object
|
115 |
edited_image.save(fp)
|
@@ -123,25 +170,27 @@ def chat(btn_upload, image_in, in_steps, in_guidance_scale, in_img_guidance_scal
|
|
123 |
|
124 |
|
125 |
#Blocks layout
|
126 |
-
with gr.Blocks() as demo:
|
127 |
-
gr.
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
|
|
|
|
145 |
chatbot = gr.Chatbot()
|
146 |
state_in = gr.State()
|
147 |
with gr.Row():
|
|
|
17 |
pipe.unet.to(memory_format=torch.channels_last)
|
18 |
|
19 |
|
20 |
+
|
21 |
+
help_text = """
|
22 |
+
**Note: Please be advised that a safety checker has been implemented in this public space.
|
23 |
+
Any attempts to generate inappropriate or NSFW images will result in the display of a black screen
|
24 |
+
as a precautionary measure to protect all users. We appreciate your cooperation in
|
25 |
+
maintaining a safe and appropriate environment for all members of our community.**
|
26 |
+
|
27 |
+
New features and bug-fixes:
|
28 |
+
|
29 |
+
1. Chat style interface
|
30 |
+
2. Now use **'reverse'** as prompt to get back the previous image after an unwanted edit
|
31 |
+
3. Use **'restart'** as prompt to get back to original image and start over!
|
32 |
+
4. Now you can load larger image files (~5 mb) as well
|
33 |
+
|
34 |
+
Some notes from the official [instruct-pix2pix](https://huggingface.co/spaces/timbrooks/instruct-pix2pix) Space by the authors and from the official [Diffusers docs](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/pix2pix) -
|
35 |
|
36 |
If you're not getting what you want, there may be a few reasons:
|
37 |
1. Is the image not changing enough? Your guidance_scale may be too low. It should be >1. Higher guidance scale encourages to generate images
|
|
|
41 |
2. Alternatively, you can toggle image_guidance_scale. Image guidance scale is to push the generated image towards the inital image. Image guidance
|
42 |
scale is enabled by setting `image_guidance_scale > 1`. Higher image guidance scale encourages to generate images that are closely
|
43 |
linked to the source image `image`, usually at the expense of lower image quality.
|
|
|
44 |
3. I have observed that rephrasing the instruction sometimes improves results (e.g., "turn him into a dog" vs. "make him a dog" vs. "as a dog").
|
|
|
45 |
4. Increasing the number of steps sometimes improves results.
|
|
|
46 |
5. Do faces look weird? The Stable Diffusion autoencoder has a hard time with faces that are small in the image. Try:
|
47 |
* Cropping the image so the face takes up a larger portion of the frame.
|
48 |
"""
|
49 |
|
50 |
+
css = """
|
51 |
+
#col-container {max-width: 580px; margin-left: auto; margin-right: auto;}
|
52 |
+
a {text-decoration-line: underline; font-weight: 600;}
|
53 |
+
.footer {
|
54 |
+
margin-bottom: 45px;
|
55 |
+
margin-top: 10px;
|
56 |
+
text-align: center;
|
57 |
+
border-bottom: 1px solid #e5e5e5;
|
58 |
+
}
|
59 |
+
.footer>p {
|
60 |
+
font-size: .8rem;
|
61 |
+
display: inline-block;
|
62 |
+
padding: 0 10px;
|
63 |
+
transform: translateY(10px);
|
64 |
+
background: white;
|
65 |
+
}
|
66 |
+
.dark .footer {
|
67 |
+
border-color: #303030;
|
68 |
+
}
|
69 |
+
.dark .footer>p {
|
70 |
+
background: #0b0f19;
|
71 |
+
}
|
72 |
+
.animate-spin {
|
73 |
+
animation: spin 1s linear infinite;
|
74 |
+
}
|
75 |
+
@keyframes spin {
|
76 |
+
from {
|
77 |
+
transform: rotate(0deg);
|
78 |
+
}
|
79 |
+
to {
|
80 |
+
transform: rotate(360deg);
|
81 |
+
}
|
82 |
+
}
|
83 |
+
"""
|
84 |
+
|
85 |
+
|
86 |
def previous(image):
|
87 |
return image
|
88 |
|
|
|
116 |
edited_image = pipe(prompt, image=image_hid, num_inference_steps=int(in_steps), guidance_scale=float(in_guidance_scale), image_guidance_scale=float(in_img_guidance_scale)).images[0]
|
117 |
if os.path.exists(img_name):
|
118 |
os.remove(img_name)
|
119 |
+
temp_img_name = img_name[:-4]+str(int(time.time()))[-4:]+'.png'
|
120 |
# Create a file-like object
|
121 |
with open(temp_img_name, "wb") as fp:
|
122 |
# Save the image to the file-like object
|
|
|
156 |
edited_image = pipe(prompt, image=image_in, num_inference_steps=int(in_steps), guidance_scale=float(in_guidance_scale), image_guidance_scale=float(in_img_guidance_scale)).images[0]
|
157 |
if os.path.exists(img_name):
|
158 |
os.remove(img_name)
|
159 |
+
temp_img_name = img_name[:-4]+str(int(time.time()))[-4:]+'.png'
|
160 |
with open(temp_img_name, "wb") as fp:
|
161 |
# Save the image to the file-like object
|
162 |
edited_image.save(fp)
|
|
|
170 |
|
171 |
|
172 |
#Blocks layout
|
173 |
+
with gr.Blocks(css="style.css") as demo:
|
174 |
+
with gr.Column(elem_id="col-container"):
|
175 |
+
gr.HTML("""<div style="text-align: center; max-width: 700px; margin: 0 auto;">
|
176 |
+
<div
|
177 |
+
style="
|
178 |
+
display: inline-flex;
|
179 |
+
align-items: center;
|
180 |
+
gap: 0.8rem;
|
181 |
+
font-size: 1.75rem;
|
182 |
+
"
|
183 |
+
>
|
184 |
+
<h1 style="font-weight: 900; margin-bottom: 7px; margin-top: 5px;">
|
185 |
+
ChatPix2Pix: Image Editing by Instructions
|
186 |
+
</h1>
|
187 |
+
</div>
|
188 |
+
<p style="margin-bottom: 10px; font-size: 94%">
|
189 |
+
For faster inference without waiting in the queue, you may duplicate the space and upgrade to GPU in settings <a href="https://huggingface.co/spaces/ysharma/InstructPix2Pix_Chatbot?duplicate=true"><img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
|
190 |
+
<a href="https://huggingface.co/timbrooks/instruct-pix2pix" target="_blank">Diffusers implementation of instruct-pix2pix</a> - InstructPix2Pix: Learning to Follow Image Editing Instructions!
|
191 |
+
</p>
|
192 |
+
</div>""")
|
193 |
+
#gr.Markdown("""<h1><center>dummy</h1></center> """)
|
194 |
chatbot = gr.Chatbot()
|
195 |
state_in = gr.State()
|
196 |
with gr.Row():
|