Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -101,7 +101,7 @@ def encode_image_to_base64(image):
|
|
101 |
return base64.b64encode(buffered.getvalue()).decode('utf-8')
|
102 |
|
103 |
|
104 |
-
def generate_image_from_text(encoded_image,pos_prompt=None):
|
105 |
neg_prompt = '''Detailed, complex textures, intricate patterns, realistic lighting, high contrast, reflections, fuzzy surface, realistic proportions, photographic quality, vibrant colors, detailed background, shadows, disfigured, deformed, ugly, multiple, duplicate.'''
|
106 |
encoded_str = encode_image_to_base64(encoded_image)
|
107 |
if pos_prompt:
|
@@ -115,7 +115,7 @@ def generate_image_from_text(encoded_image,pos_prompt=None):
|
|
115 |
},
|
116 |
'imageGenerationConfig': {
|
117 |
"cfgScale": 8,
|
118 |
-
"seed":
|
119 |
"width": 512,
|
120 |
"height": 512,
|
121 |
"numberOfImages": 1
|
@@ -131,7 +131,7 @@ def generate_image_from_text(encoded_image,pos_prompt=None):
|
|
131 |
},
|
132 |
'imageGenerationConfig': {
|
133 |
"cfgScale": 8,
|
134 |
-
"seed":
|
135 |
"width": 512,
|
136 |
"height": 512,
|
137 |
"numberOfImages": 1
|
@@ -180,8 +180,8 @@ def generate(image, mc_resolution, formats=["obj", "glb"]):
|
|
180 |
|
181 |
return mesh_path_obj.name, mesh_path_glb.name
|
182 |
|
183 |
-
def run_example(image, do_remove_background, foreground_ratio, mc_resolution, text_prompt=None):
|
184 |
-
image_pil = generate_image_from_text(encoded_image=image, pos_prompt=text_prompt)
|
185 |
preprocessed = preprocess(image_pil, do_remove_background, foreground_ratio)
|
186 |
mesh_name_obj, mesh_name_glb = generate(preprocessed, 256, ["obj", "glb"])
|
187 |
return preprocessed, mesh_name_obj, mesh_name_glb
|
@@ -204,6 +204,7 @@ with gr.Blocks() as demo:
|
|
204 |
label="Text Prompt",
|
205 |
placeholder="Enter Positive Prompt"
|
206 |
)
|
|
|
207 |
processed_image = gr.Image(label="Processed Image", interactive=False, visible=False)
|
208 |
with gr.Row():
|
209 |
with gr.Group():
|
@@ -242,7 +243,7 @@ with gr.Blocks() as demo:
|
|
242 |
|
243 |
submit.click(fn=check_input_image, inputs=[input_image]).success(
|
244 |
fn=run_example,
|
245 |
-
inputs=[input_image, do_remove_background, foreground_ratio, mc_resolution, text_prompt],
|
246 |
outputs=[processed_image, output_model_obj, output_model_glb],
|
247 |
# outputs=[output_model_obj, output_model_glb],
|
248 |
)
|
|
|
101 |
return base64.b64encode(buffered.getvalue()).decode('utf-8')
|
102 |
|
103 |
|
104 |
+
def generate_image_from_text(encoded_image, seed, pos_prompt=None):
|
105 |
neg_prompt = '''Detailed, complex textures, intricate patterns, realistic lighting, high contrast, reflections, fuzzy surface, realistic proportions, photographic quality, vibrant colors, detailed background, shadows, disfigured, deformed, ugly, multiple, duplicate.'''
|
106 |
encoded_str = encode_image_to_base64(encoded_image)
|
107 |
if pos_prompt:
|
|
|
115 |
},
|
116 |
'imageGenerationConfig': {
|
117 |
"cfgScale": 8,
|
118 |
+
"seed": seed,
|
119 |
"width": 512,
|
120 |
"height": 512,
|
121 |
"numberOfImages": 1
|
|
|
131 |
},
|
132 |
'imageGenerationConfig': {
|
133 |
"cfgScale": 8,
|
134 |
+
"seed": seed,
|
135 |
"width": 512,
|
136 |
"height": 512,
|
137 |
"numberOfImages": 1
|
|
|
180 |
|
181 |
return mesh_path_obj.name, mesh_path_glb.name
|
182 |
|
183 |
+
def run_example(image, seed, do_remove_background, foreground_ratio, mc_resolution, text_prompt=None):
|
184 |
+
image_pil = generate_image_from_text(encoded_image=image, seed, pos_prompt=text_prompt)
|
185 |
preprocessed = preprocess(image_pil, do_remove_background, foreground_ratio)
|
186 |
mesh_name_obj, mesh_name_glb = generate(preprocessed, 256, ["obj", "glb"])
|
187 |
return preprocessed, mesh_name_obj, mesh_name_glb
|
|
|
204 |
label="Text Prompt",
|
205 |
placeholder="Enter Positive Prompt"
|
206 |
)
|
207 |
+
seed = gr.Textbox(label="Random Seed", value=42)
|
208 |
processed_image = gr.Image(label="Processed Image", interactive=False, visible=False)
|
209 |
with gr.Row():
|
210 |
with gr.Group():
|
|
|
243 |
|
244 |
submit.click(fn=check_input_image, inputs=[input_image]).success(
|
245 |
fn=run_example,
|
246 |
+
inputs=[input_image, seed, do_remove_background, foreground_ratio, mc_resolution, text_prompt],
|
247 |
outputs=[processed_image, output_model_obj, output_model_glb],
|
248 |
# outputs=[output_model_obj, output_model_glb],
|
249 |
)
|