Update app.py
Browse files
app.py
CHANGED
@@ -10,11 +10,11 @@ from io import BytesIO
|
|
10 |
from google import genai
|
11 |
from google.genai import types
|
12 |
|
13 |
-
#
|
14 |
from dotenv import load_dotenv
|
15 |
load_dotenv()
|
16 |
|
17 |
-
#
|
18 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
19 |
logger = logging.getLogger(__name__)
|
20 |
|
@@ -24,84 +24,45 @@ def save_binary_file(file_name, data):
|
|
24 |
|
25 |
def preprocess_prompt(prompt, image1, image2, image3):
|
26 |
"""
|
27 |
-
|
|
|
28 |
"""
|
29 |
-
#
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
if "#1" in prompt:
|
36 |
-
prompt = prompt.replace("#1", "first image" if has_img1 else "first image (none)")
|
37 |
-
if "#2" in prompt:
|
38 |
-
prompt = prompt.replace("#2", "second image" if has_img2 else "second image (none)")
|
39 |
-
if "#3" in prompt:
|
40 |
-
prompt = prompt.replace("#3", "third image" if has_img3 else "third image (none)")
|
41 |
-
|
42 |
-
# Interpret function commands
|
43 |
-
if "1. Change Image" in prompt:
|
44 |
-
desc_match = re.search(r'#1 to "(.*?)"', prompt)
|
45 |
-
if desc_match:
|
46 |
-
description = desc_match.group(1)
|
47 |
-
prompt = f"Please change the first image to {description}. Reinterpret it with a new style and mood while preserving its main content."
|
48 |
-
else:
|
49 |
-
prompt = "Please creatively transform the first image into a more vivid and artistic version."
|
50 |
-
|
51 |
-
elif "2. Remove Text" in prompt:
|
52 |
-
text_match = re.search(r'remove "(.*?)" from #1', prompt)
|
53 |
-
if text_match:
|
54 |
-
text_to_remove = text_match.group(1)
|
55 |
-
prompt = f"Please find and naturally remove the text '{text_to_remove}' from the first image, filling the area with a harmonious background."
|
56 |
-
else:
|
57 |
-
prompt = "Please naturally remove all text from the first image to create a clean look."
|
58 |
-
|
59 |
-
elif "3. Replace Face" in prompt:
|
60 |
-
prompt = "Please seamlessly replace the face in the first image with the face from the second image. Follow the expression and features of the second image while keeping the rest of the first image intact."
|
61 |
-
|
62 |
-
elif "4. Change Outfit" in prompt:
|
63 |
-
if "#3" in prompt or "or #3" in prompt:
|
64 |
-
prompt = "Please seamlessly change the outfit in the first image to the outfit from the second or third image. Follow the style and color of the referenced image while maintaining the body proportions and pose of the first image."
|
65 |
-
else:
|
66 |
-
prompt = "Please seamlessly change the outfit in the first image to the outfit from the second image. Follow the style and color of the second image while maintaining the body proportions and pose of the first image."
|
67 |
-
|
68 |
-
elif "5. Change Background" in prompt:
|
69 |
-
prompt = "Please seamlessly replace the background of the first image with the background from the second image, keeping the main subject intact and harmoniously merging with the new background."
|
70 |
-
|
71 |
-
elif "6. Blend Images (with product)" in prompt:
|
72 |
-
if "#3" in prompt or "or #3" in prompt:
|
73 |
-
prompt = "Please seamlessly blend the first, second, and third images into one image, ensuring that all key elements are included, especially the product."
|
74 |
-
else:
|
75 |
-
prompt = "Please seamlessly blend the first and second images into one image, ensuring that all key elements are included, especially the product."
|
76 |
-
|
77 |
-
elif "7. Apply Style" in prompt:
|
78 |
-
prompt = "Please transform the content of the first image into the style of the second image, preserving its main subject and composition while applying the artistic style, colors, and texture of the second image."
|
79 |
-
|
80 |
-
elif "change to red" in prompt:
|
81 |
-
prompt = "Please change the first image to a red tone. Adjust the overall colors to red hues while maintaining a natural look."
|
82 |
-
|
83 |
-
prompt += " Please generate the image."
|
84 |
return prompt
|
85 |
|
86 |
def generate_with_images(prompt, images):
|
87 |
"""
|
88 |
-
|
89 |
"""
|
90 |
try:
|
|
|
91 |
api_key = os.environ.get("GEMINI_API_KEY")
|
92 |
if not api_key:
|
93 |
return None, "API key is not set. Please check your environment variables."
|
94 |
|
|
|
95 |
client = genai.Client(api_key=api_key)
|
96 |
-
logger.info(f"Starting Gemini API request - Prompt: {prompt}")
|
97 |
|
|
|
|
|
|
|
98 |
contents = []
|
|
|
|
|
99 |
contents.append(prompt)
|
|
|
|
|
100 |
for idx, img in enumerate(images, 1):
|
101 |
if img is not None:
|
102 |
contents.append(img)
|
103 |
-
logger.info(f"
|
104 |
|
|
|
105 |
response = client.models.generate_content(
|
106 |
model="gemini-2.0-flash-exp-image-generation",
|
107 |
contents=contents,
|
@@ -114,12 +75,14 @@ def generate_with_images(prompt, images):
|
|
114 |
)
|
115 |
)
|
116 |
|
|
|
117 |
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp:
|
118 |
temp_path = tmp.name
|
119 |
|
120 |
result_text = ""
|
121 |
image_found = False
|
122 |
|
|
|
123 |
for part in response.candidates[0].content.parts:
|
124 |
if hasattr(part, 'text') and part.text:
|
125 |
result_text += part.text
|
@@ -127,144 +90,113 @@ def generate_with_images(prompt, images):
|
|
127 |
elif hasattr(part, 'inline_data') and part.inline_data:
|
128 |
save_binary_file(temp_path, part.inline_data.data)
|
129 |
image_found = True
|
130 |
-
logger.info("
|
131 |
|
132 |
if not image_found:
|
133 |
return None, f"API did not generate an image. Response text: {result_text}"
|
134 |
|
|
|
135 |
result_img = Image.open(temp_path)
|
136 |
if result_img.mode == "RGBA":
|
137 |
result_img = result_img.convert("RGB")
|
138 |
|
139 |
-
return result_img, f"Image successfully
|
140 |
|
141 |
except Exception as e:
|
142 |
-
logger.exception("Error
|
143 |
return None, f"Error occurred: {str(e)}"
|
144 |
|
145 |
def process_images_with_prompt(image1, image2, image3, prompt):
|
146 |
"""
|
147 |
-
|
148 |
-
|
149 |
-
and retries up to 3 times if generation fails.
|
150 |
"""
|
151 |
try:
|
|
|
152 |
images = [image1, image2, image3]
|
153 |
valid_images = [img for img in images if img is not None]
|
154 |
|
|
|
155 |
if not prompt or not prompt.strip():
|
156 |
if len(valid_images) == 0:
|
157 |
-
prompt = "Please
|
158 |
-
logger.info("
|
159 |
elif len(valid_images) == 1:
|
160 |
prompt = "Please creatively transform this image into a more vivid and artistic version."
|
161 |
-
logger.info("
|
162 |
elif len(valid_images) == 2:
|
163 |
-
prompt = "Please seamlessly blend these two images
|
164 |
-
logger.info("
|
165 |
else:
|
166 |
-
prompt = "Please creatively
|
167 |
-
logger.info("
|
168 |
else:
|
169 |
prompt = preprocess_prompt(prompt, image1, image2, image3)
|
170 |
|
171 |
-
|
172 |
-
|
173 |
-
result_img, status = generate_with_images(prompt, valid_images)
|
174 |
-
if result_img is not None:
|
175 |
-
return result_img, status
|
176 |
-
else:
|
177 |
-
logger.info(f"Image generation failed, retrying ({attempt+1}/{max_retries})")
|
178 |
-
return None, f"Failed to generate image. Last status: {status}"
|
179 |
|
180 |
except Exception as e:
|
181 |
-
logger.exception("Error
|
182 |
return None, f"Error occurred: {str(e)}"
|
183 |
|
184 |
-
|
185 |
-
"""
|
186 |
-
Returns a prompt template based on the selected function.
|
187 |
-
(Custom text input has been removed.)
|
188 |
-
"""
|
189 |
-
function_templates = {
|
190 |
-
"1. Change Image": '#1 to "desired description"',
|
191 |
-
"2. Remove Text": 'remove "text to remove" from #1',
|
192 |
-
"3. Replace Face": "replace the face in #1 with the face from #2",
|
193 |
-
"4. Change Outfit": "change the outfit in #1 to that from #2 or #3",
|
194 |
-
"5. Change Background": "change the background of #1 to the background from #2",
|
195 |
-
"6. Blend Images (with product)": "blend #1 with #2 or #3",
|
196 |
-
"7. Apply Style": "apply the style of #2 to #1"
|
197 |
-
}
|
198 |
-
|
199 |
-
return function_templates.get(function_choice, "")
|
200 |
-
|
201 |
-
# Gradio Interface
|
202 |
with gr.Blocks() as demo:
|
203 |
gr.HTML(
|
204 |
"""
|
205 |
<div style="text-align: center; margin-bottom: 1rem;">
|
206 |
<h1>Simple Image Generator</h1>
|
207 |
-
<p>
|
208 |
</div>
|
209 |
"""
|
210 |
)
|
211 |
|
212 |
with gr.Row():
|
213 |
with gr.Column():
|
|
|
214 |
with gr.Row():
|
215 |
-
image1_input = gr.Image(type="pil", label="
|
216 |
-
image2_input = gr.Image(type="pil", label="
|
217 |
-
image3_input = gr.Image(type="pil", label="
|
218 |
-
|
219 |
-
function_dropdown = gr.Dropdown(
|
220 |
-
choices=[
|
221 |
-
"1. Change Image",
|
222 |
-
"2. Remove Text",
|
223 |
-
"3. Replace Face",
|
224 |
-
"4. Change Outfit",
|
225 |
-
"5. Change Background",
|
226 |
-
"6. Blend Images (with product)",
|
227 |
-
"7. Apply Style"
|
228 |
-
],
|
229 |
-
label="Select Function",
|
230 |
-
value=None
|
231 |
-
)
|
232 |
|
|
|
233 |
prompt_input = gr.Textbox(
|
234 |
lines=3,
|
235 |
-
placeholder="Enter
|
236 |
-
label="Prompt (
|
237 |
)
|
238 |
|
|
|
239 |
submit_btn = gr.Button("Generate Image", variant="primary")
|
240 |
|
241 |
with gr.Column():
|
|
|
242 |
output_image = gr.Image(label="Generated Image")
|
243 |
output_text = gr.Textbox(label="Status Message")
|
244 |
prompt_display = gr.Textbox(label="Used Prompt", visible=True)
|
245 |
|
246 |
-
#
|
247 |
-
function_dropdown.change(
|
248 |
-
fn=update_prompt_from_function,
|
249 |
-
inputs=[function_dropdown],
|
250 |
-
outputs=[prompt_input]
|
251 |
-
)
|
252 |
-
|
253 |
def process_and_show_prompt(image1, image2, image3, prompt):
|
|
|
254 |
images = [image1, image2, image3]
|
255 |
valid_images = [img for img in images if img is not None]
|
256 |
|
257 |
try:
|
|
|
|
|
|
|
|
|
|
|
258 |
auto_prompt = prompt
|
259 |
if not prompt or not prompt.strip():
|
260 |
if len(valid_images) == 0:
|
261 |
-
auto_prompt = "Please
|
262 |
elif len(valid_images) == 1:
|
263 |
auto_prompt = "Please creatively transform this image into a more vivid and artistic version."
|
264 |
elif len(valid_images) == 2:
|
265 |
-
auto_prompt = "Please seamlessly blend these two images
|
266 |
else:
|
267 |
-
auto_prompt = "Please creatively
|
268 |
else:
|
269 |
auto_prompt = preprocess_prompt(prompt, image1, image2, image3)
|
270 |
|
@@ -272,7 +204,7 @@ with gr.Blocks() as demo:
|
|
272 |
|
273 |
return result_img, status, auto_prompt
|
274 |
except Exception as e:
|
275 |
-
logger.exception("Error
|
276 |
return None, f"Error occurred: {str(e)}", prompt
|
277 |
|
278 |
submit_btn.click(
|
@@ -283,14 +215,14 @@ with gr.Blocks() as demo:
|
|
283 |
|
284 |
gr.Markdown(
|
285 |
"""
|
286 |
-
###
|
287 |
|
288 |
-
1. **
|
289 |
-
2. **
|
290 |
-
3. **Image
|
291 |
-
4. **Automatic Retry:** If image generation fails, it will automatically retry up to 2 additional times.
|
292 |
"""
|
293 |
)
|
294 |
|
|
|
295 |
if __name__ == "__main__":
|
296 |
demo.launch(share=True)
|
|
|
10 |
from google import genai
|
11 |
from google.genai import types
|
12 |
|
13 |
+
# ํ๊ฒฝ๋ณ์ ๋ก๋
|
14 |
from dotenv import load_dotenv
|
15 |
load_dotenv()
|
16 |
|
17 |
+
# ๋ก๊น
์ค์
|
18 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
19 |
logger = logging.getLogger(__name__)
|
20 |
|
|
|
24 |
|
25 |
def preprocess_prompt(prompt, image1, image2, image3):
|
26 |
"""
|
27 |
+
์
๋ ฅ๋ ํ๋กฌํํธ๊ฐ ์์ด๋ก๋ง ๋์ด ์๋์ง ํ์ธํ๊ณ ,
|
28 |
+
๋จ์ํ "Please generate the image."๋ฅผ ๋ง๋ถ์ฌ ๋ฐํํฉ๋๋ค.
|
29 |
"""
|
30 |
+
# ํ๋กฌํํธ์ ํ๊ธ์ด ํฌํจ๋์ด ์์ผ๋ฉด ์๋ฌ ๋ฐ์
|
31 |
+
if re.search(r'[ใฑ-ใ
๊ฐ-ํฃ]', prompt):
|
32 |
+
raise ValueError("Error: Prompt must be in English only.")
|
33 |
+
|
34 |
+
# ๋ถํ์ํ ๊ธฐ๋ฅ ๊ด๋ จ ์ฒ๋ฆฌ๋ ์ ๊ฑฐํ๊ณ ๋จ์ ๋ช
๋ น์ด ์ถ๊ฐ
|
35 |
+
prompt = prompt.strip() + " Please generate the image."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
return prompt
|
37 |
|
38 |
def generate_with_images(prompt, images):
|
39 |
"""
|
40 |
+
๊ณต์ ๋ฌธ์์ ๊ธฐ๋ฐํ ์ฌ๋ฐ๋ฅธ API ํธ์ถ ๋ฐฉ์ ๊ตฌํ
|
41 |
"""
|
42 |
try:
|
43 |
+
# API ํค ํ์ธ
|
44 |
api_key = os.environ.get("GEMINI_API_KEY")
|
45 |
if not api_key:
|
46 |
return None, "API key is not set. Please check your environment variables."
|
47 |
|
48 |
+
# Gemini ํด๋ผ์ด์ธํธ ์ด๊ธฐํ
|
49 |
client = genai.Client(api_key=api_key)
|
|
|
50 |
|
51 |
+
logger.info(f"Gemini API ์์ฒญ ์์ - ํ๋กฌํํธ: {prompt}")
|
52 |
+
|
53 |
+
# ์ปจํ
์ธ ์ค๋น
|
54 |
contents = []
|
55 |
+
|
56 |
+
# ํ
์คํธ ํ๋กฌํํธ ์ถ๊ฐ
|
57 |
contents.append(prompt)
|
58 |
+
|
59 |
+
# ์ด๋ฏธ์ง ์ถ๊ฐ (์ด๋ฏธ์ง๊ฐ ์์ผ๋ฉด ํ
์คํธ๋ง ์ ์ก)
|
60 |
for idx, img in enumerate(images, 1):
|
61 |
if img is not None:
|
62 |
contents.append(img)
|
63 |
+
logger.info(f"Image #{idx} added.")
|
64 |
|
65 |
+
# ์์ฑ ์ค์ - ๊ณต์ ๋ฌธ์์ ๋ฐ๋ผ responseModalities ์ค์
|
66 |
response = client.models.generate_content(
|
67 |
model="gemini-2.0-flash-exp-image-generation",
|
68 |
contents=contents,
|
|
|
75 |
)
|
76 |
)
|
77 |
|
78 |
+
# ์์ ํ์ผ ์์ฑ
|
79 |
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp:
|
80 |
temp_path = tmp.name
|
81 |
|
82 |
result_text = ""
|
83 |
image_found = False
|
84 |
|
85 |
+
# ์๋ต ์ฒ๋ฆฌ
|
86 |
for part in response.candidates[0].content.parts:
|
87 |
if hasattr(part, 'text') and part.text:
|
88 |
result_text += part.text
|
|
|
90 |
elif hasattr(part, 'inline_data') and part.inline_data:
|
91 |
save_binary_file(temp_path, part.inline_data.data)
|
92 |
image_found = True
|
93 |
+
logger.info("Image extracted successfully from response.")
|
94 |
|
95 |
if not image_found:
|
96 |
return None, f"API did not generate an image. Response text: {result_text}"
|
97 |
|
98 |
+
# ๊ฒฐ๊ณผ ์ด๋ฏธ์ง ๋ฐํ
|
99 |
result_img = Image.open(temp_path)
|
100 |
if result_img.mode == "RGBA":
|
101 |
result_img = result_img.convert("RGB")
|
102 |
|
103 |
+
return result_img, f"Image generated successfully. {result_text}"
|
104 |
|
105 |
except Exception as e:
|
106 |
+
logger.exception("Error during image generation:")
|
107 |
return None, f"Error occurred: {str(e)}"
|
108 |
|
109 |
def process_images_with_prompt(image1, image2, image3, prompt):
|
110 |
"""
|
111 |
+
3๊ฐ์ ์ด๋ฏธ์ง์ ํ๋กฌํํธ๋ฅผ ์ฒ๋ฆฌํ๋ ํจ์.
|
112 |
+
์ด๋ฏธ์ง ์
๋ ฅ์ด ์์ด๋ ํ๋กฌํํธ๋ง์ผ๋ก API ํธ์ถ์ ์งํํฉ๋๋ค.
|
|
|
113 |
"""
|
114 |
try:
|
115 |
+
# ์ด๋ฏธ์ง ๊ฐ์ ํ์ธ (์ด๋ฏธ์ง ์์ด๋ ์งํ)
|
116 |
images = [image1, image2, image3]
|
117 |
valid_images = [img for img in images if img is not None]
|
118 |
|
119 |
+
# ํ๋กฌํํธ๊ฐ ์์ผ๋ฉด ์
๋ก๋๋ ์ด๋ฏธ์ง ์์ ๋ฐ๋ผ ์๋ ํฉ์ฑ ํ๋กฌํํธ ์์ฑ (์์ด)
|
120 |
if not prompt or not prompt.strip():
|
121 |
if len(valid_images) == 0:
|
122 |
+
prompt = "Please generate an image based on the description."
|
123 |
+
logger.info("Auto prompt generated for no image input.")
|
124 |
elif len(valid_images) == 1:
|
125 |
prompt = "Please creatively transform this image into a more vivid and artistic version."
|
126 |
+
logger.info("Auto prompt generated for a single image.")
|
127 |
elif len(valid_images) == 2:
|
128 |
+
prompt = "Please seamlessly blend these two images, integrating their elements harmoniously into a single image."
|
129 |
+
logger.info("Auto prompt generated for two images.")
|
130 |
else:
|
131 |
+
prompt = "Please creatively composite these three images, incorporating their key elements into a natural and coherent scene."
|
132 |
+
logger.info("Auto prompt generated for three images.")
|
133 |
else:
|
134 |
prompt = preprocess_prompt(prompt, image1, image2, image3)
|
135 |
|
136 |
+
# ์๋ก์ด API ํธ์ถ ๋ฐฉ์ ์ฌ์ฉ
|
137 |
+
return generate_with_images(prompt, valid_images)
|
|
|
|
|
|
|
|
|
|
|
|
|
138 |
|
139 |
except Exception as e:
|
140 |
+
logger.exception("Error during image processing:")
|
141 |
return None, f"Error occurred: {str(e)}"
|
142 |
|
143 |
+
# Gradio ์ธํฐํ์ด์ค (๊ธฐ๋ฅ ์ ํ, ๊ธฐ๋ฅ ์ ์ฉ, ์ปค์คํ
ํ
์คํธ ๊ด๋ จ ์์ ์ ๊ฑฐ)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
144 |
with gr.Blocks() as demo:
|
145 |
gr.HTML(
|
146 |
"""
|
147 |
<div style="text-align: center; margin-bottom: 1rem;">
|
148 |
<h1>Simple Image Generator</h1>
|
149 |
+
<p>Upload an image (or leave empty) and click generate to create an image based on the English prompt.</p>
|
150 |
</div>
|
151 |
"""
|
152 |
)
|
153 |
|
154 |
with gr.Row():
|
155 |
with gr.Column():
|
156 |
+
# 3๊ฐ์ ์ด๋ฏธ์ง ์
๋ ฅ (์ด๋ฏธ์ง ์์ด๋ ์คํ ๊ฐ๋ฅ)
|
157 |
with gr.Row():
|
158 |
+
image1_input = gr.Image(type="pil", label="Image 1", image_mode="RGB")
|
159 |
+
image2_input = gr.Image(type="pil", label="Image 2", image_mode="RGB")
|
160 |
+
image3_input = gr.Image(type="pil", label="Image 3", image_mode="RGB")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
161 |
|
162 |
+
# ํ๋กฌํํธ ์
๋ ฅ (์์ด๋ก๋ง ์
๋ ฅ)
|
163 |
prompt_input = gr.Textbox(
|
164 |
lines=3,
|
165 |
+
placeholder="Enter the prompt in English.",
|
166 |
+
label="Prompt (Required: English only)"
|
167 |
)
|
168 |
|
169 |
+
# ์์ฑ ๋ฒํผ
|
170 |
submit_btn = gr.Button("Generate Image", variant="primary")
|
171 |
|
172 |
with gr.Column():
|
173 |
+
# ๊ฒฐ๊ณผ ์ถ๋ ฅ
|
174 |
output_image = gr.Image(label="Generated Image")
|
175 |
output_text = gr.Textbox(label="Status Message")
|
176 |
prompt_display = gr.Textbox(label="Used Prompt", visible=True)
|
177 |
|
178 |
+
# ์ด๋ฏธ์ง ์์ฑ ๋ฒํผ ํด๋ฆญ ์ด๋ฒคํธ
|
|
|
|
|
|
|
|
|
|
|
|
|
179 |
def process_and_show_prompt(image1, image2, image3, prompt):
|
180 |
+
# ์ด๋ฏธ์ง ๊ฐ์ ํ์ธ
|
181 |
images = [image1, image2, image3]
|
182 |
valid_images = [img for img in images if img is not None]
|
183 |
|
184 |
try:
|
185 |
+
# ๋ง์ฝ ์ฌ์ฉ์๊ฐ ํ๋กฌํํธ๋ฅผ ์
๋ ฅํ๋ค๋ฉด ์์ด๋ง ํฌํจ๋์๋์ง ํ์ธ
|
186 |
+
if prompt and re.search(r'[ใฑ-ใ
๊ฐ-ํฃ]', prompt):
|
187 |
+
return None, "Error: Prompt must be in English only.", prompt
|
188 |
+
|
189 |
+
# ํ๋กฌํํธ๊ฐ ์์ผ๋ฉด ์๋ ์์ฑ (์์ด)
|
190 |
auto_prompt = prompt
|
191 |
if not prompt or not prompt.strip():
|
192 |
if len(valid_images) == 0:
|
193 |
+
auto_prompt = "Please generate an image based on the description."
|
194 |
elif len(valid_images) == 1:
|
195 |
auto_prompt = "Please creatively transform this image into a more vivid and artistic version."
|
196 |
elif len(valid_images) == 2:
|
197 |
+
auto_prompt = "Please seamlessly blend these two images, integrating their elements harmoniously into a single image."
|
198 |
else:
|
199 |
+
auto_prompt = "Please creatively composite these three images, incorporating their key elements into a natural and coherent scene."
|
200 |
else:
|
201 |
auto_prompt = preprocess_prompt(prompt, image1, image2, image3)
|
202 |
|
|
|
204 |
|
205 |
return result_img, status, auto_prompt
|
206 |
except Exception as e:
|
207 |
+
logger.exception("Error during processing:")
|
208 |
return None, f"Error occurred: {str(e)}", prompt
|
209 |
|
210 |
submit_btn.click(
|
|
|
215 |
|
216 |
gr.Markdown(
|
217 |
"""
|
218 |
+
### Instructions:
|
219 |
|
220 |
+
1. **Auto Generation**: You can leave the image upload empty and the system will generate an image based solely on the prompt.
|
221 |
+
2. **Prompt Requirement**: Enter the prompt in English only.
|
222 |
+
3. **Image Reference**: The app supports up to three image inputs.
|
|
|
223 |
"""
|
224 |
)
|
225 |
|
226 |
+
# ์ ํ๋ฆฌ์ผ์ด์
์คํ
|
227 |
if __name__ == "__main__":
|
228 |
demo.launch(share=True)
|