Update app.py
Browse files
app.py
CHANGED
@@ -35,6 +35,27 @@ bfl_repo="black-forest-labs/FLUX.1-dev"
|
|
35 |
BG_COLOR = (255, 255, 255) # white
|
36 |
MASK_COLOR = (0, 0 , 0) # black
|
37 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
def maskHead(input):
|
39 |
base_options = python.BaseOptions(model_asset_path='selfie_multiclass_256x256.tflite')
|
40 |
options = vision.ImageSegmenterOptions(base_options=base_options,
|
@@ -102,8 +123,6 @@ pipe = FluxInpaintPipeline.from_pretrained(bfl_repo, torch_dtype=torch.bfloat16)
|
|
102 |
MAX_SEED = np.iinfo(np.int32).max
|
103 |
TRIGGER = "a photo of TOK"
|
104 |
|
105 |
-
print(dir(pipe))
|
106 |
-
|
107 |
|
108 |
@spaces.GPU(duration=100)
|
109 |
def execute(image, prompt, debug=False):
|
@@ -118,28 +137,49 @@ def execute(image, prompt, debug=False):
|
|
118 |
img = cv2.imread(image)
|
119 |
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
120 |
|
121 |
-
imgs = [ random_positioning(img)
|
122 |
|
123 |
pipe.load_lora_weights("XLabs-AI/flux-RealismLora", weight_name='lora.safetensors')
|
124 |
response = []
|
125 |
|
126 |
-
|
127 |
-
|
128 |
-
generator = torch.Generator().manual_seed(seed_slicer)
|
129 |
|
|
|
130 |
current_img = imgs[image]
|
131 |
cv2.imwrite('base_image.jpg', current_img)
|
132 |
-
cv2.imwrite("
|
|
|
133 |
|
134 |
im = Image.open('base_image.jpg')
|
135 |
np_arr = np.array(im)
|
136 |
rgb_image = cv2.cvtColor(np_arr, cv2.COLOR_BGR2RGB)
|
|
|
137 |
im = Image.fromarray(rgb_image)
|
138 |
-
|
139 |
|
140 |
result = pipe(
|
141 |
prompt=f"{prompt} {TRIGGER}",
|
142 |
image=im,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
143 |
mask_image=mask,
|
144 |
width=1024,
|
145 |
height=1024,
|
@@ -152,8 +192,8 @@ def execute(image, prompt, debug=False):
|
|
152 |
|
153 |
if debug:
|
154 |
response.append(im)
|
|
|
155 |
response.append(mask)
|
156 |
-
response.append(result)
|
157 |
|
158 |
return response
|
159 |
|
@@ -173,4 +213,4 @@ iface = gr.Interface(
|
|
173 |
outputs="gallery"
|
174 |
)
|
175 |
|
176 |
-
iface.launch()
|
|
|
35 |
BG_COLOR = (255, 255, 255) # white
|
36 |
MASK_COLOR = (0, 0 , 0) # black
|
37 |
|
38 |
+
def maskPerson(input):
|
39 |
+
base_options = python.BaseOptions(model_asset_path='selfie_multiclass_256x256.tflite')
|
40 |
+
options = vision.ImageSegmenterOptions(base_options=base_options,
|
41 |
+
output_category_mask=True)
|
42 |
+
|
43 |
+
with vision.ImageSegmenter.create_from_options(options) as segmenter:
|
44 |
+
image = mp.Image.create_from_file(input)
|
45 |
+
segmentation_result = segmenter.segment(image)
|
46 |
+
person_mask = segmentation_result.confidence_masks[0]
|
47 |
+
|
48 |
+
image_data = image.numpy_view()
|
49 |
+
fg_image = np.zeros(image_data.shape, dtype=np.uint8)
|
50 |
+
fg_image[:] = MASK_COLOR
|
51 |
+
bg_image = np.zeros(image_data.shape, dtype=np.uint8)
|
52 |
+
bg_image[:] = BG_COLOR
|
53 |
+
|
54 |
+
condition = np.stack((person_mask.numpy_view(),) * 3, axis=-1) > 0.2
|
55 |
+
output_image = np.where(condition, fg_image, bg_image)
|
56 |
+
|
57 |
+
return output_image
|
58 |
+
|
59 |
def maskHead(input):
|
60 |
base_options = python.BaseOptions(model_asset_path='selfie_multiclass_256x256.tflite')
|
61 |
options = vision.ImageSegmenterOptions(base_options=base_options,
|
|
|
123 |
MAX_SEED = np.iinfo(np.int32).max
|
124 |
TRIGGER = "a photo of TOK"
|
125 |
|
|
|
|
|
126 |
|
127 |
@spaces.GPU(duration=100)
|
128 |
def execute(image, prompt, debug=False):
|
|
|
137 |
img = cv2.imread(image)
|
138 |
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
139 |
|
140 |
+
imgs = [ random_positioning(img)]
|
141 |
|
142 |
pipe.load_lora_weights("XLabs-AI/flux-RealismLora", weight_name='lora.safetensors')
|
143 |
response = []
|
144 |
|
145 |
+
seed_slicer = random.randint(0, MAX_SEED)
|
146 |
+
generator = torch.Generator().manual_seed(seed_slicer)
|
|
|
147 |
|
148 |
+
for image in range(len(imgs)):
|
149 |
current_img = imgs[image]
|
150 |
cv2.imwrite('base_image.jpg', current_img)
|
151 |
+
cv2.imwrite("mask_person.jpg", maskPerson('base_image.jpg'))
|
152 |
+
#cv2.imwrite("mask.jpg", maskHead('base_image.jpg'))
|
153 |
|
154 |
im = Image.open('base_image.jpg')
|
155 |
np_arr = np.array(im)
|
156 |
rgb_image = cv2.cvtColor(np_arr, cv2.COLOR_BGR2RGB)
|
157 |
+
|
158 |
im = Image.fromarray(rgb_image)
|
159 |
+
person = Image.open('mask_person.jpg')
|
160 |
|
161 |
result = pipe(
|
162 |
prompt=f"{prompt} {TRIGGER}",
|
163 |
image=im,
|
164 |
+
mask_image=person,
|
165 |
+
width=1024,
|
166 |
+
height=1024,
|
167 |
+
strength=0.85,
|
168 |
+
generator=generator,
|
169 |
+
num_inference_steps=28,
|
170 |
+
max_sequence_length=256,
|
171 |
+
joint_attention_kwargs={"scale": 0.9},
|
172 |
+
).images[0]
|
173 |
+
|
174 |
+
arr = np.array(result)
|
175 |
+
rgb_image = cv2.cvtColor(arr, cv2.COLOR_BGR2RGB)
|
176 |
+
cv2.imwrite('person.jpg', rgb_image)
|
177 |
+
cv2.imwrite("mask.jpg", maskHead('person.jpg'))
|
178 |
+
mask = Image.open('mask.jpg')
|
179 |
+
|
180 |
+
result = pipe(
|
181 |
+
prompt=f"{prompt} {TRIGGER}",
|
182 |
+
image=result,
|
183 |
mask_image=mask,
|
184 |
width=1024,
|
185 |
height=1024,
|
|
|
192 |
|
193 |
if debug:
|
194 |
response.append(im)
|
195 |
+
response.append(person)
|
196 |
response.append(mask)
|
|
|
197 |
|
198 |
return response
|
199 |
|
|
|
213 |
outputs="gallery"
|
214 |
)
|
215 |
|
216 |
+
iface.launch(share=True, debug=True)
|