p2991459 commited on
Commit
db78959
·
1 Parent(s): 50e7f15

Upload 3 files

Browse files
Files changed (3) hide show
  1. cutted_full.h5 +3 -0
  2. final.py +234 -0
  3. finalize.h5 +3 -0
cutted_full.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02ce1cdf9f00d2d984231406815842f9335424c154abb0c5f59701a6e185f882
3
+ size 530371808
final.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from PIL import Image
3
+ import os
4
+ import cv2
5
+ import numpy as np
6
+ import tensorflow as tf
7
+
8
+ W = 512
9
+ H = 512
10
+
11
+ """ Load the model """
12
+ # model_path = os.path.join("/content/drive/MyDrive/colab/", "cutted_full.h5")
13
+ edge_model = tf.keras.models.load_model("cutted_full.h5")
14
+
15
+ """ Load the model """
16
+ model_path = "finalize.h5"
17
+ extraction_model = tf.keras.models.load_model(model_path)
18
+
19
+ def read_image(path):
20
+ # path = path.decode()
21
+ x = cv2.imread(path, cv2.IMREAD_COLOR)
22
+ first_5_columns = x[500:-400, :50, :]
23
+ last_5_columns = x[500:-400, -50:, :]
24
+ new_image = np.concatenate((first_5_columns, last_5_columns), axis=1)
25
+ x = cv2.resize(new_image, (H, W))
26
+ x = x / 255.0
27
+ x = np.expand_dims(x, axis=0)
28
+ return x
29
+
30
+
31
+ def edger(image_path,model):
32
+ image = read_image(image_path)
33
+ print("completed reading")
34
+ print(image.shape)
35
+
36
+ """ Prediction """
37
+ pred = model.predict(image, verbose=0)
38
+
39
+ n = np.array(pred)
40
+ n.shape
41
+
42
+ pr = (n * 255).astype(np.uint8)
43
+ return pr[1][0]
44
+
45
+ def reshaping(original_image):
46
+ image = cv2.resize(original_image,(512,788))
47
+
48
+ height, width = image.shape[:2]
49
+
50
+ top_rows = 500
51
+ bottom_rows = 400
52
+
53
+ empty_row = np.zeros((1, width), dtype=np.uint8)
54
+
55
+ for _ in range(top_rows):
56
+ image = np.vstack((empty_row, image))
57
+
58
+ for _ in range(bottom_rows):
59
+ image = np.vstack((image, empty_row))
60
+
61
+ return image
62
+
63
+ def background_generator(image_path,model):
64
+ # Load the original image
65
+ # original = cv2.imread('original.jpg')
66
+ # height, width, channel = original.shape
67
+ height, width = 1688,3008
68
+
69
+ background_color = (240, 240, 240)
70
+ image = np.full((height, width, 3), background_color, dtype=np.uint8)
71
+
72
+ # Edge Game
73
+ ######################################################################
74
+
75
+ original_image = edger(image_path,model)
76
+
77
+ original_image = reshaping(original_image)
78
+
79
+ # original_image = cv2.resize(original_image,(3008,1688))
80
+
81
+ print(original_image.shape)
82
+
83
+ # Find the biggest edge on the left side
84
+ left_edge = np.max(original_image[:, :5])
85
+ print(left_edge)
86
+
87
+ # Find the biggest edge on the right side
88
+ right_edge = np.max(original_image[:, -5:])
89
+ print(right_edge)
90
+
91
+ ######################################################################
92
+
93
+ # Draw a curved black line resembling where the wall starts
94
+ line_color = (0, 0, 0)
95
+ line_thickness = 30
96
+
97
+ # Use the positions of the left and right maximum points as control points
98
+ left_max_row = np.argmax(original_image[:, 5])
99
+ right_max_row = np.argmax(original_image[:, -5])
100
+ print(left_max_row)
101
+ print(right_max_row)
102
+
103
+ # Define the curve using control points
104
+ start_point = (0, left_max_row)
105
+ end_point = (width, right_max_row)
106
+ control_point = (width // 2, int(left_max_row-0.18 * height))
107
+
108
+ # Get the height and width of the image
109
+ height, width = image.shape[:2]
110
+
111
+ # Calculate the midpoint
112
+ midpoint = height // 2
113
+
114
+ # Split the image into upper and lower halves
115
+ upper_half = image[:midpoint, :]
116
+ lower_half = image[midpoint:, :]
117
+
118
+ # Change the color of the lower half
119
+ lower_half[:] = (240, 240, 240)
120
+
121
+ # Draw the straight line in the middle
122
+ completing_line_start = (1 * width // 10, int(0.49 * height))
123
+ completing_line_end = (8 * width // 10, int(0.49 * height))
124
+ completing_line_color = (240, 240, 240)
125
+ completing_line_thickness = 60
126
+ cv2.line(image, completing_line_start, completing_line_end, completing_line_color, completing_line_thickness, cv2.LINE_AA)
127
+
128
+ # Generate a set of points along the curve using quadratic Bezier curve formula
129
+ t = np.linspace(0, 1, 100)
130
+ curve_points = [(int((1 - x) ** 2 * start_point[0] + 2 * (1 - x) * x * control_point[0] + x ** 2 * end_point[0]),
131
+ int((1 - x) ** 2 * start_point[1] + 2 * (1 - x) * x * control_point[1] + x ** 2 * end_point[1]))
132
+ for x in t]
133
+
134
+ for i in range(1, len(curve_points)):
135
+ cv2.line(image, curve_points[i - 1], curve_points[i], line_color, line_thickness, cv2.LINE_AA)
136
+
137
+ blur_radius = 9
138
+ image = cv2.GaussianBlur(image, (blur_radius, blur_radius), 0)
139
+
140
+ # cv2.imwrite("results/background.jpg", image)
141
+ return image
142
+
143
+
144
+
145
+ def merging(car_path,background_path):
146
+ car = Image.open(car_path)
147
+ background = Image.open(background_path)
148
+ car = car.resize(background.size)
149
+ merged_image = Image.alpha_composite(background.convert('RGBA'), car.convert('RGBA'))
150
+ return merged_image
151
+
152
+ """ Creating a directory """
153
+ def create_dir(path):
154
+ if not os.path.exists(path):
155
+ os.makedirs(path)
156
+
157
+ def prediction(image):
158
+ """ Directory for storing files """
159
+ for item in ["joint", "mask", "extracted"]:
160
+ create_dir(f"results/{item}")
161
+
162
+ name = "input_image"
163
+ image_path = "results/temp.jpg"
164
+
165
+ image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
166
+ cv2.imwrite(image_path,image)
167
+ x = cv2.resize(image, (W, H))
168
+ x = x / 255.0
169
+ x = np.expand_dims(x, axis=0)
170
+
171
+ """ Prediction """
172
+ pred = extraction_model.predict(x, verbose=0)
173
+
174
+ line = np.ones((H, 10, 4)) * 255
175
+
176
+ pred_list = []
177
+ for item in pred:
178
+ p = item[0] * 255
179
+ p = np.concatenate([p, p, p, p], axis=-1)
180
+
181
+ pred_list.append(p)
182
+ pred_list.append(line)
183
+
184
+ cat_images = np.concatenate(pred_list, axis=1)
185
+
186
+ """ Save final mask """
187
+ image_h, image_w, _ = image.shape
188
+
189
+ y0 = pred[0][0]
190
+ y0 = cv2.resize(y0, (image_w, image_h))
191
+ y0 = np.expand_dims(y0, axis=-1)
192
+ ny = np.where(y0 > 0, 1, y0)
193
+
194
+ rgb = image[:, :, 0:3]
195
+ alpha = y0 * 255
196
+
197
+ final = np.concatenate((rgb.copy(), alpha), axis=2)
198
+ yy = cv2.merge((ny.copy(), ny.copy(), ny.copy(), y0.copy()))
199
+ mask = yy * 255
200
+
201
+ line = np.ones((image_h, 10, 4)) * 255
202
+
203
+ # cat_images = np.concatenate([mask, line, final], axis=1)
204
+
205
+ # Save the final image with alpha channel and the mask image
206
+ final_image_path = f"results/extracted/{name}.png"
207
+ mask_image_path = f"results/mask/{name}.png"
208
+
209
+ cv2.imwrite(final_image_path, final)
210
+ cv2.imwrite(mask_image_path, mask)
211
+
212
+ # Read both images with IMREAD_UNCHANGED
213
+ final_image = cv2.imread(final_image_path, cv2.IMREAD_UNCHANGED)
214
+ mask_image = cv2.imread(mask_image_path, cv2.IMREAD_UNCHANGED)
215
+
216
+ # Convert to RGB color space
217
+ final_image_rgb = cv2.cvtColor(final_image, cv2.COLOR_BGRA2RGBA)
218
+ # mask_image_rgb = cv2.cvtColor(mask_image, cv2.COLOR_BGR2RGB)
219
+
220
+ back = background_generator(image_path,edge_model)
221
+ cv2.imwrite("background.jpg",back)
222
+ final = merging(final_image_path,"background.jpg")
223
+ final.save("results/merged.png")
224
+
225
+ complete = cv2.imread("results/merged.png")
226
+
227
+ complete = cv2.cvtColor(complete, cv2.COLOR_RGB2BGR)
228
+
229
+ return final_image_rgb, mask_image, complete
230
+
231
+
232
+ # Create a Gradio interface with two output components
233
+ iface = gr.Interface(fn=prediction, inputs="image", outputs=["image", "image", "image"], title="Image Segmentation with New Background")
234
+ iface.launch()
finalize.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eaeda232f5fe6f2f59df7ba6ffd73d198278f38288af1e50ca430f2fe12a777d
3
+ size 530371464