Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -6,7 +6,7 @@ from PIL import Image, ImageDraw
|
|
6 |
from gradio_client import Client, handle_file
|
7 |
import numpy as np
|
8 |
import cv2
|
9 |
-
|
10 |
|
11 |
# Инициализация моделей
|
12 |
from transformers import OneFormerProcessor, OneFormerForUniversalSegmentation
|
@@ -21,7 +21,6 @@ oneFormer_model = OneFormerForUniversalSegmentation.from_pretrained("shi-labs/on
|
|
21 |
# upscaling_client = InferenceClient(model="stabilityai/stable-diffusion-x4-upscaler")
|
22 |
# inpainting_client = InferenceClient(model="stabilityai/stable-diffusion-inpainting")
|
23 |
|
24 |
-
|
25 |
# Функции для обработки изображений
|
26 |
def segment_image(image):
|
27 |
image = Image.fromarray(image)
|
@@ -62,42 +61,6 @@ def segment_image(image):
|
|
62 |
|
63 |
return cropped_masks_with_labels
|
64 |
|
65 |
-
# def merge_segments_by_labels(gallery_images, labels_input):
|
66 |
-
# """
|
67 |
-
# Объединяет сегменты из галереи изображений в одно изображение,
|
68 |
-
# основываясь на введенных пользователем метках.
|
69 |
-
|
70 |
-
# Args:
|
71 |
-
# gallery_images: Список изображений сегментов (кортежи (изображение, метка)).
|
72 |
-
# labels_input: Строка с метками, разделенными точкой с запятой.
|
73 |
-
|
74 |
-
# Returns:
|
75 |
-
# Список изображений, где выбранные сегменты объединены в одно.
|
76 |
-
# """
|
77 |
-
# labels_to_merge = [label.strip() for label in labels_input.split(";")]
|
78 |
-
# merged_image = None
|
79 |
-
# merged_indices = []
|
80 |
-
|
81 |
-
# for i, (image_path, label) in enumerate(gallery_images):
|
82 |
-
# if label in labels_to_merge:
|
83 |
-
# image = cv2.imread(image_path)
|
84 |
-
# if merged_image is None:
|
85 |
-
# merged_image = image.copy()
|
86 |
-
# else:
|
87 |
-
# merged_image = cv2.add(merged_image, image)
|
88 |
-
# merged_indices.append(i)
|
89 |
-
# if merged_image is not None:
|
90 |
-
# new_gallery_images = [
|
91 |
-
# item for i, item in enumerate(gallery_images) if i not in merged_indices
|
92 |
-
# ]
|
93 |
-
|
94 |
-
# new_name = labels_to_merge[0]
|
95 |
-
# new_gallery_images.append((merged_image, new_name))
|
96 |
-
|
97 |
-
# return new_gallery_images
|
98 |
-
# else:
|
99 |
-
# return gallery_images
|
100 |
-
|
101 |
|
102 |
def merge_segments_by_labels(gallery_images, labels_input):
|
103 |
labels_to_merge = [label.strip() for label in labels_input.split(";")]
|
@@ -107,19 +70,19 @@ def merge_segments_by_labels(gallery_images, labels_input):
|
|
107 |
for i, (image_path, label) in enumerate(gallery_images): # Исправлено: image_path
|
108 |
if label in labels_to_merge:
|
109 |
# Загружаем изображение с помощью PIL, сохраняя альфа-канал
|
110 |
-
image = Image.open(image_path).convert("RGBA")
|
111 |
|
112 |
if merged_image is None:
|
113 |
merged_image = image.copy()
|
114 |
else:
|
115 |
# Объединяем изображения с учетом альфа-канала
|
116 |
-
merged_image = Image.alpha_composite(merged_image, image)
|
117 |
merged_indices.append(i)
|
118 |
-
|
119 |
if merged_image is not None:
|
120 |
# Преобразуем объединенное изображение в numpy array
|
121 |
-
merged_image_np = np.array(merged_image)
|
122 |
-
|
123 |
new_gallery_images = [
|
124 |
item for i, item in enumerate(gallery_images) if i not in merged_indices
|
125 |
]
|
@@ -130,42 +93,26 @@ def merge_segments_by_labels(gallery_images, labels_input):
|
|
130 |
return gallery_images
|
131 |
|
132 |
|
133 |
-
# def set_client_for_session(request: gr.Request):
|
134 |
-
# x_ip_token = request.headers['x-ip-token']
|
135 |
-
# return Client("JeffreyXiang/TRELLIS", headers={"X-IP-Token": x_ip_token})
|
136 |
-
|
137 |
def set_hunyuan_client(request: gr.Request):
|
138 |
try:
|
139 |
x_ip_token = request.headers['x-ip-token']
|
140 |
-
|
|
|
|
|
141 |
except:
|
|
|
142 |
return Client("tencent/Hunyuan3D-2")
|
143 |
-
|
144 |
def set_vFusion_client(request: gr.Request):
|
145 |
try:
|
146 |
x_ip_token = request.headers['x-ip-token']
|
147 |
-
|
|
|
|
|
148 |
except:
|
|
|
149 |
return Client("facebook/VFusion3D")
|
150 |
|
151 |
-
# def generate_3d_model(client, segment_output, segment_name):
|
152 |
-
# for i, (image_path, label) in enumerate(segment_output):
|
153 |
-
# if label == segment_name:
|
154 |
-
# result = client.predict(
|
155 |
-
# image=handle_file(image_path),
|
156 |
-
# multiimages=[],
|
157 |
-
# seed=0,
|
158 |
-
# ss_guidance_strength=7.5,
|
159 |
-
# ss_sampling_steps=12,
|
160 |
-
# slat_guidance_strength=3,
|
161 |
-
# slat_sampling_steps=12,
|
162 |
-
# multiimage_algo="stochastic",
|
163 |
-
# api_name="/image_to_3d"
|
164 |
-
# )
|
165 |
-
# break
|
166 |
-
# print(result)
|
167 |
-
# return result["video"]
|
168 |
-
|
169 |
def generate_3d_model(client, segment_output, segment_name):
|
170 |
for i, (image_path, label) in enumerate(segment_output):
|
171 |
if label == segment_name:
|
@@ -180,7 +127,7 @@ def generate_3d_model(client, segment_output, segment_name):
|
|
180 |
api_name="/shape_generation"
|
181 |
)
|
182 |
print(result)
|
183 |
-
return
|
184 |
|
185 |
def generate_3d_model_texture(client, segment_output, segment_name):
|
186 |
for i, (image_path, label) in enumerate(segment_output):
|
@@ -196,7 +143,7 @@ def generate_3d_model_texture(client, segment_output, segment_name):
|
|
196 |
api_name="/generation_all"
|
197 |
)
|
198 |
print(result)
|
199 |
-
return
|
200 |
|
201 |
def generate_3d_model2(client, segment_output, segment_name):
|
202 |
for i, (image_path, label) in enumerate(segment_output):
|
@@ -206,7 +153,7 @@ def generate_3d_model2(client, segment_output, segment_name):
|
|
206 |
api_name="/step_1_generate_obj"
|
207 |
)
|
208 |
print(result)
|
209 |
-
return
|
210 |
|
211 |
|
212 |
# def classify_segments(segments):
|
@@ -225,8 +172,6 @@ def generate_3d_model2(client, segment_output, segment_name):
|
|
225 |
# return inpainted
|
226 |
|
227 |
|
228 |
-
from gradio_litmodel3d import LitModel3D
|
229 |
-
|
230 |
with gr.Blocks() as demo:
|
231 |
hunyuan_client = gr.State()
|
232 |
vFusion_client = gr.State()
|
@@ -264,11 +209,10 @@ with gr.Blocks() as demo:
|
|
264 |
# interactive=True # this allow users to interact with the model
|
265 |
# )
|
266 |
trellis_output = gr.Model3D(label="3D Model")
|
267 |
-
trellis_output2 = gr.Model3D(clear_color=[0.0, 0.0, 0.0, 0.0], label="3D Model Wireframe")
|
268 |
# trellis_button.click(generate_3d_model, inputs=[client, segment_output, trellis_input], outputs=trellis_output)
|
269 |
-
hunyuan_button.click(generate_3d_model, inputs=[hunyuan_client, segment_output, trellis_input], outputs=
|
270 |
-
hunyuan_button_texture.click(generate_3d_model_texture, inputs=[hunyuan_client, segment_output, trellis_input], outputs=
|
271 |
-
vFusion_button.click(generate_3d_model2, inputs=[vFusion_client, segment_output, trellis_input], outputs=
|
272 |
|
273 |
segment_button.click(segment_image, inputs=image_input, outputs=segment_output)
|
274 |
# segment_button.click(segment_full_image, inputs=image_input, outputs=segment_output)
|
|
|
6 |
from gradio_client import Client, handle_file
|
7 |
import numpy as np
|
8 |
import cv2
|
9 |
+
import os
|
10 |
|
11 |
# Инициализация моделей
|
12 |
from transformers import OneFormerProcessor, OneFormerForUniversalSegmentation
|
|
|
21 |
# upscaling_client = InferenceClient(model="stabilityai/stable-diffusion-x4-upscaler")
|
22 |
# inpainting_client = InferenceClient(model="stabilityai/stable-diffusion-inpainting")
|
23 |
|
|
|
24 |
# Функции для обработки изображений
|
25 |
def segment_image(image):
|
26 |
image = Image.fromarray(image)
|
|
|
61 |
|
62 |
return cropped_masks_with_labels
|
63 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
|
65 |
def merge_segments_by_labels(gallery_images, labels_input):
|
66 |
labels_to_merge = [label.strip() for label in labels_input.split(";")]
|
|
|
70 |
for i, (image_path, label) in enumerate(gallery_images): # Исправлено: image_path
|
71 |
if label in labels_to_merge:
|
72 |
# Загружаем изображение с помощью PIL, сохраняя альфа-канал
|
73 |
+
image = Image.open(image_path).convert("RGBA")
|
74 |
|
75 |
if merged_image is None:
|
76 |
merged_image = image.copy()
|
77 |
else:
|
78 |
# Объединяем изображения с учетом альфа-канала
|
79 |
+
merged_image = Image.alpha_composite(merged_image, image)
|
80 |
merged_indices.append(i)
|
81 |
+
|
82 |
if merged_image is not None:
|
83 |
# Преобразуем объединенное изображение в numpy array
|
84 |
+
merged_image_np = np.array(merged_image)
|
85 |
+
|
86 |
new_gallery_images = [
|
87 |
item for i, item in enumerate(gallery_images) if i not in merged_indices
|
88 |
]
|
|
|
93 |
return gallery_images
|
94 |
|
95 |
|
|
|
|
|
|
|
|
|
96 |
def set_hunyuan_client(request: gr.Request):
|
97 |
try:
|
98 |
x_ip_token = request.headers['x-ip-token']
|
99 |
+
client = Client("tencent/Hunyuan3D-2", headers={"X-IP-Token": x_ip_token})
|
100 |
+
print(x_ip_token, "tencent/Hunyuan3D-2 Ip token")
|
101 |
+
return client
|
102 |
except:
|
103 |
+
print("tencent/Hunyuan3D-2 no token")
|
104 |
return Client("tencent/Hunyuan3D-2")
|
105 |
+
|
106 |
def set_vFusion_client(request: gr.Request):
|
107 |
try:
|
108 |
x_ip_token = request.headers['x-ip-token']
|
109 |
+
client = Client("facebook/VFusion3D", headers={"X-IP-Token": x_ip_token})
|
110 |
+
print(x_ip_token, "facebook/VFusion3D Ip token")
|
111 |
+
return client
|
112 |
except:
|
113 |
+
print("facebook/VFusion3D no token")
|
114 |
return Client("facebook/VFusion3D")
|
115 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
def generate_3d_model(client, segment_output, segment_name):
|
117 |
for i, (image_path, label) in enumerate(segment_output):
|
118 |
if label == segment_name:
|
|
|
127 |
api_name="/shape_generation"
|
128 |
)
|
129 |
print(result)
|
130 |
+
return result[0]
|
131 |
|
132 |
def generate_3d_model_texture(client, segment_output, segment_name):
|
133 |
for i, (image_path, label) in enumerate(segment_output):
|
|
|
143 |
api_name="/generation_all"
|
144 |
)
|
145 |
print(result)
|
146 |
+
return result[1]
|
147 |
|
148 |
def generate_3d_model2(client, segment_output, segment_name):
|
149 |
for i, (image_path, label) in enumerate(segment_output):
|
|
|
153 |
api_name="/step_1_generate_obj"
|
154 |
)
|
155 |
print(result)
|
156 |
+
return result[0]
|
157 |
|
158 |
|
159 |
# def classify_segments(segments):
|
|
|
172 |
# return inpainted
|
173 |
|
174 |
|
|
|
|
|
175 |
with gr.Blocks() as demo:
|
176 |
hunyuan_client = gr.State()
|
177 |
vFusion_client = gr.State()
|
|
|
209 |
# interactive=True # this allow users to interact with the model
|
210 |
# )
|
211 |
trellis_output = gr.Model3D(label="3D Model")
|
|
|
212 |
# trellis_button.click(generate_3d_model, inputs=[client, segment_output, trellis_input], outputs=trellis_output)
|
213 |
+
hunyuan_button.click(generate_3d_model, inputs=[hunyuan_client, segment_output, trellis_input], outputs=trellis_output)
|
214 |
+
hunyuan_button_texture.click(generate_3d_model_texture, inputs=[hunyuan_client, segment_output, trellis_input], outputs=trellis_output)
|
215 |
+
vFusion_button.click(generate_3d_model2, inputs=[vFusion_client, segment_output, trellis_input], outputs=trellis_output)
|
216 |
|
217 |
segment_button.click(segment_image, inputs=image_input, outputs=segment_output)
|
218 |
# segment_button.click(segment_full_image, inputs=image_input, outputs=segment_output)
|