yeq6x commited on
Commit
738b300
·
1 Parent(s): 741ac19
Files changed (1) hide show
  1. app.py +52 -188
app.py CHANGED
@@ -1,13 +1,13 @@
1
  import spaces
2
- import numpy as np
 
 
3
  from PIL import Image
4
- import gradio as gr
5
- import open3d as o3d
6
- import trimesh
7
- from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, EulerAncestralDiscreteScheduler
8
  import torch
9
- from collections import Counter
10
- import random
 
 
11
  from controlnet_aux import OpenposeDetector
12
 
13
  ratios_map = {
@@ -31,7 +31,9 @@ ratios_map = {
31
  }
32
  ratios = np.array(list(ratios_map.keys()))
33
 
 
34
  openpose = OpenposeDetector.from_pretrained('lllyasviel/ControlNet')
 
35
  controlnet = ControlNetModel.from_pretrained(
36
  "yeq6x/Image2PositionColor_v3",
37
  torch_dtype=torch.float16
@@ -52,7 +54,8 @@ pipe.scheduler = EulerAncestralDiscreteScheduler(
52
  num_train_timesteps=1000,
53
  steps_offset=1
54
  )
55
- # pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
 
56
  pipe.force_zeros_for_empty_prompt = False
57
 
58
  def get_size(init_image):
@@ -69,6 +72,17 @@ def resize_image(image):
69
  w,h = get_size(image)
70
  resized_image = image.resize((w, h))
71
  return resized_image
 
 
 
 
 
 
 
 
 
 
 
72
 
73
  @spaces.GPU
74
  def generate_(prompt, negative_prompt, pose_image, input_image, num_steps, controlnet_conditioning_scale, seed):
@@ -81,6 +95,7 @@ def generate_(prompt, negative_prompt, pose_image, input_image, num_steps, contr
81
 
82
  @spaces.GPU
83
  def process(input_image, prompt, negative_prompt, num_steps, controlnet_conditioning_scale, seed):
 
84
  # resize input_image to 1024x1024
85
  input_image = resize_image(input_image)
86
 
@@ -89,186 +104,35 @@ def process(input_image, prompt, negative_prompt, num_steps, controlnet_conditio
89
  images = generate_(prompt, negative_prompt, pose_image, input_image, num_steps, controlnet_conditioning_scale, seed)
90
 
91
  return [pose_image,images[0]]
92
-
93
- # @spaces.GPU
94
- # def predict_image(cond_image, prompt, negative_prompt, controlnet_conditioning_scale):
95
- # print("predict position map")
96
- # global pipe
97
- # generator = torch.Generator()
98
- # generator.manual_seed(random.randint(0, 2147483647))
99
- # image = pipe(
100
- # prompt,
101
- # negative_prompt=negative_prompt,
102
- # image = cond_image,
103
- # width=1024,
104
- # height=1024,
105
- # guidance_scale=8,
106
- # num_inference_steps=20,
107
- # generator=generator,
108
- # guess_mode = True,
109
- # controlnet_conditioning_scale = controlnet_conditioning_scale
110
- # ).images[0]
111
-
112
- # return image
113
-
114
-
115
- # def convert_pil_to_opencv(pil_image):
116
- # return np.array(pil_image)
117
-
118
- # def inv_func(y,
119
- # c = -712.380100,
120
- # a = 137.375240,
121
- # b = 192.435866):
122
- # return (np.exp((y - c) / a) - np.exp(-c/a)) / 964.8468371292845
123
-
124
- # def create_point_cloud(img1, img2):
125
- # if img1.shape != img2.shape:
126
- # raise ValueError("Both images must have the same dimensions.")
127
-
128
- # h, w, _ = img1.shape
129
- # points = []
130
- # colors = []
131
- # for y in range(h):
132
- # for x in range(w):
133
- # # ピクセル位置 (x, y) のRGBをXYZとして取得
134
- # r, g, b = img1[y, x]
135
- # r = inv_func(r) * 0.9
136
- # g = inv_func(g) / 1.7 * 0.6
137
- # b = inv_func(b)
138
- # r *= 150
139
- # g *= 150
140
- # b *= 150
141
- # points.append([g, b, r]) # X, Y, Z
142
- # # 対応するピクセル位置の画像2の色を取得
143
- # colors.append(img2[y, x] / 255.0) # 色は0〜1にスケール
144
-
145
- # return np.array(points), np.array(colors)
146
-
147
- # def point_cloud_to_glb(points, colors):
148
- # # Open3Dでポイントクラウドを作成
149
- # pc = o3d.geometry.PointCloud()
150
- # pc.points = o3d.utility.Vector3dVector(points)
151
- # pc.colors = o3d.utility.Vector3dVector(colors)
152
-
153
- # # 一時的にPLY形式で保存
154
- # temp_ply_file = "temp_output.ply"
155
- # o3d.io.write_point_cloud(temp_ply_file, pc)
156
-
157
- # # PLYをGLBに変換
158
- # mesh = trimesh.load(temp_ply_file)
159
- # glb_file = "output.glb"
160
- # mesh.export(glb_file)
161
-
162
- # return glb_file
163
-
164
- # def visualize_3d(image1, image2):
165
- # print("Processing...")
166
- # # PIL画像をOpenCV形式に変換
167
- # img1 = convert_pil_to_opencv(image1)
168
- # img2 = convert_pil_to_opencv(image2)
169
-
170
- # # ポイントクラウド生成
171
- # points, colors = create_point_cloud(img1, img2)
172
-
173
- # # GLB形式に変換
174
- # glb_file = point_cloud_to_glb(points, colors)
175
-
176
- # return glb_file
177
-
178
- # def scale_image(original_image):
179
- # aspect_ratio = original_image.width / original_image.height
180
-
181
- # if original_image.width > original_image.height:
182
- # new_width = 1024
183
- # new_height = round(new_width / aspect_ratio)
184
- # else:
185
- # new_height = 1024
186
- # new_width = round(new_height * aspect_ratio)
187
-
188
- # resized_original = original_image.resize((new_width, new_height), Image.LANCZOS)
189
-
190
- # return resized_original
191
-
192
- # def get_edge_mode_color(img, edge_width=10):
193
- # # 外周の10ピクセル領域を取得
194
- # left = img.crop((0, 0, edge_width, img.height)) # 左端
195
- # right = img.crop((img.width - edge_width, 0, img.width, img.height)) # 右端
196
- # top = img.crop((0, 0, img.width, edge_width)) # 上端
197
- # bottom = img.crop((0, img.height - edge_width, img.width, img.height)) # 下端
198
-
199
- # # 各領域のピクセルデータを取得して結合
200
- # colors = list(left.getdata()) + list(right.getdata()) + list(top.getdata()) + list(bottom.getdata())
201
-
202
- # # 最頻値(mode)を計算
203
- # mode_color = Counter(colors).most_common(1)[0][0] # 最も頻繁に出現する色を取得
204
-
205
- # return mode_color
206
-
207
- # def paste_image(resized_img):
208
- # # 外周10pxの最頻値を背景色に設定
209
- # mode_color = get_edge_mode_color(resized_img, edge_width=10)
210
- # mode_background = Image.new("RGBA", (1024, 1024), mode_color)
211
- # mode_background = mode_background.convert('RGB')
212
-
213
- # x = (1024 - resized_img.width) // 2
214
- # y = (1024 - resized_img.height) // 2
215
- # mode_background.paste(resized_img, (x, y))
216
-
217
- # return mode_background
218
-
219
- # def outpaint_image(image):
220
- # if type(image) == type(None):
221
- # return None
222
- # resized_img = scale_image(image)
223
- # image = paste_image(resized_img)
224
-
225
- # return image
226
-
227
  block = gr.Blocks().queue()
228
 
229
  with block:
230
- with gr.Row():
231
- with gr.Column():
232
- input_image = gr.Image(sources=None, type="pil") # None for upload, ctrl+v and webcam
233
- prompt = gr.Textbox(label="Prompt")
234
- negative_prompt = gr.Textbox(label="Negative prompt", value="Logo,Watermark,Text,Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate,Mutilated,Mutilated hands,Poorly drawn face,Deformed,Bad anatomy,Cloned face,Malformed limbs,Missing legs,Too many fingers")
235
- num_steps = gr.Slider(label="Number of steps", minimum=25, maximum=100, value=50, step=1)
236
- controlnet_conditioning_scale = gr.Slider(label="ControlNet conditioning scale", minimum=0.1, maximum=2.0, value=1.0, step=0.05)
237
- seed = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, randomize=True,)
238
- run_button = gr.Button(value="Run")
239
-
240
- with gr.Column():
241
- with gr.Row():
242
- pose_image_output = gr.Image(label="Pose Image", type="pil", interactive=False)
243
- generated_image_output = gr.Image(label="Generated Image", type="pil", interactive=False)
244
-
245
- ips = [input_image, prompt, negative_prompt, num_steps, controlnet_conditioning_scale, seed]
246
- run_button.click(fn=process, inputs=ips, outputs=[pose_image_output, generated_image_output])
247
-
248
- # gr.Markdown("## Position Map Visualizer")
249
-
250
- # with gr.Row():
251
- # with gr.Column():
252
- # with gr.Row():
253
- # img1 = gr.Image(type="pil", label="color Image", height=300)
254
- # img2 = gr.Image(type="pil", label="map Image", height=300)
255
- # prompt = gr.Textbox("position map, 1girl, white background", label="Prompt")
256
- # negative_prompt = gr.Textbox("lowres, bad anatomy, bad hands, bad feet, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry", label="Negative Prompt")
257
- # controlnet_conditioning_scale = gr.Slider(label="ControlNet conditioning scale", minimum=0.1, maximum=2.0, value=0.6, step=0.05)
258
- # predict_map_btn = gr.Button("Predict Position Map")
259
- # visualize_3d_btn = gr.Button("Generate 3D Point Cloud")
260
- # with gr.Column():
261
- # reconstruction_output = gr.Model3D(label="3D Viewer", height=600)
262
- # gr.Examples(
263
- # examples=[
264
- # ["resources/source/000006.png", "resources/target/000006.png"],
265
- # ["resources/source/006420.png", "resources/target/006420.png"],
266
- # ],
267
- # inputs=[img1, img2]
268
- # )
269
-
270
- # img1.input(outpaint_image, inputs=img1, outputs=img1)
271
- # predict_map_btn.click(predict_image, inputs=[img1, prompt, negative_prompt, controlnet_conditioning_scale], outputs=img2)
272
- # visualize_3d_btn.click(visualize_3d, inputs=[img2, img1], outputs=reconstruction_output)
273
-
274
- block.launch(debug = True)
 
1
  import spaces
2
+ from diffusers import ControlNetModel
3
+ from diffusers import StableDiffusionXLControlNetPipeline
4
+ from diffusers import EulerAncestralDiscreteScheduler
5
  from PIL import Image
 
 
 
 
6
  import torch
7
+ import numpy as np
8
+ import cv2
9
+ import gradio as gr
10
+ from torchvision import transforms
11
  from controlnet_aux import OpenposeDetector
12
 
13
  ratios_map = {
 
31
  }
32
  ratios = np.array(list(ratios_map.keys()))
33
 
34
+
35
  openpose = OpenposeDetector.from_pretrained('lllyasviel/ControlNet')
36
+
37
  controlnet = ControlNetModel.from_pretrained(
38
  "yeq6x/Image2PositionColor_v3",
39
  torch_dtype=torch.float16
 
54
  num_train_timesteps=1000,
55
  steps_offset=1
56
  )
57
+ # pipe.enable_freeu(b1=1.1, b2=1.1, s1=0.5, s2=0.7)
58
+ # pipe.enable_xformers_memory_efficient_attention()
59
  pipe.force_zeros_for_empty_prompt = False
60
 
61
  def get_size(init_image):
 
72
  w,h = get_size(image)
73
  resized_image = image.resize((w, h))
74
  return resized_image
75
+
76
+ def resize_image_old(image):
77
+ image = image.convert('RGB')
78
+ current_size = image.size
79
+ if current_size[0] > current_size[1]:
80
+ center_cropped_image = transforms.functional.center_crop(image, (current_size[1], current_size[1]))
81
+ else:
82
+ center_cropped_image = transforms.functional.center_crop(image, (current_size[0], current_size[0]))
83
+ resized_image = transforms.functional.resize(center_cropped_image, (1024, 1024))
84
+ return resized_image
85
+
86
 
87
  @spaces.GPU
88
  def generate_(prompt, negative_prompt, pose_image, input_image, num_steps, controlnet_conditioning_scale, seed):
 
95
 
96
  @spaces.GPU
97
  def process(input_image, prompt, negative_prompt, num_steps, controlnet_conditioning_scale, seed):
98
+
99
  # resize input_image to 1024x1024
100
  input_image = resize_image(input_image)
101
 
 
104
  images = generate_(prompt, negative_prompt, pose_image, input_image, num_steps, controlnet_conditioning_scale, seed)
105
 
106
  return [pose_image,images[0]]
107
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
  block = gr.Blocks().queue()
109
 
110
  with block:
111
+ gr.Markdown("## BRIA 2.3 ControlNet Pose")
112
+ gr.HTML('''
113
+ <p style="margin-bottom: 10px; font-size: 94%">
114
+ This is a demo for ControlNet Pose that using
115
+ <a href="https://huggingface.co/briaai/BRIA-2.3" target="_blank">BRIA 2.3 text-to-image model</a> as backbone.
116
+ Trained on licensed data, BRIA 2.3 provide full legal liability coverage for copyright and privacy infringement.
117
+ </p>
118
+ ''')
119
+ with gr.Row():
120
+ with gr.Column():
121
+ input_image = gr.Image(sources=None, type="pil") # None for upload, ctrl+v and webcam
122
+ prompt = gr.Textbox(label="Prompt")
123
+ negative_prompt = gr.Textbox(label="Negative prompt", value="Logo,Watermark,Text,Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate,Mutilated,Mutilated hands,Poorly drawn face,Deformed,Bad anatomy,Cloned face,Malformed limbs,Missing legs,Too many fingers")
124
+ num_steps = gr.Slider(label="Number of steps", minimum=25, maximum=100, value=50, step=1)
125
+ controlnet_conditioning_scale = gr.Slider(label="ControlNet conditioning scale", minimum=0.1, maximum=2.0, value=1.0, step=0.05)
126
+ seed = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, randomize=True,)
127
+ run_button = gr.Button(value="Run")
128
+
129
+ with gr.Column():
130
+ with gr.Row():
131
+ pose_image_output = gr.Image(label="Pose Image", type="pil", interactive=False)
132
+ generated_image_output = gr.Image(label="Generated Image", type="pil", interactive=False)
133
+
134
+ ips = [input_image, prompt, negative_prompt, num_steps, controlnet_conditioning_scale, seed]
135
+ run_button.click(fn=process, inputs=ips, outputs=[pose_image_output, generated_image_output])
136
+
137
+
138
+ block.launch(debug = True)