K00B404 commited on
Commit
0b9ecea
·
verified ·
1 Parent(s): 1eaec07

Update outpaintprocessor.py

Browse files
Files changed (1) hide show
  1. outpaintprocessor.py +239 -0
outpaintprocessor.py CHANGED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import io
3
+ from typing import Dict, Any, Optional
4
+ from PIL import Image
5
+ import numpy as np
6
+ import requests
7
+
8
+ class DynamicImageOutpainter:
9
+ """
10
+ A sophisticated image processing class for iterative outpainting and padding.
11
+
12
+ ## Key Features:
13
+ - Dynamic image cropping and centering
14
+ - Iterative outpainting with configurable steps
15
+ - Flexible padding mechanism
16
+ - AI-driven edge generation
17
+
18
+ ## Usage Strategy:
19
+ 1. Initialize with base image and generation parameters
20
+ 2. Apply iterative padding and outpainting
21
+ 3. Support multiple AI inference backends
22
+ """
23
+
24
+ def __init__(
25
+ self,
26
+ endpoint_url: str,
27
+ api_token: str,
28
+ padding_size: int = 256,
29
+ max_iterations: int = 3
30
+ ):
31
+ """
32
+ Initialize the outpainting processor.
33
+
34
+ Args:
35
+ endpoint_url (str): AI inference endpoint URL
36
+ api_token (str): Authentication token for API
37
+ padding_size (int): Size of padding around cropped image
38
+ max_iterations (int): Maximum number of outpainting iterations
39
+ """
40
+ self.endpoint_url = endpoint_url
41
+ self.api_token = api_token
42
+ self.padding_size = padding_size
43
+ self.max_iterations = max_iterations
44
+
45
+ self.headers = {
46
+ "Authorization": f"Bearer {self.api_token}",
47
+ "Content-Type": "application/json",
48
+ "Accept": "image/png"
49
+ }
50
+
51
+ def encode_image(self, image: Image.Image) -> str:
52
+ """
53
+ Base64 encode a PIL Image for API transmission.
54
+
55
+ Args:
56
+ image (Image.Image): Source image to encode
57
+
58
+ Returns:
59
+ str: Base64 encoded image string
60
+ """
61
+ buffered = io.BytesIO()
62
+ image.save(buffered, format="PNG")
63
+ return base64.b64encode(buffered.getvalue()).decode("utf-8")
64
+
65
+ def crop_to_center(self, image: Image.Image) -> Image.Image:
66
+ """
67
+ Crop image to its center, maintaining square aspect ratio.
68
+
69
+ Args:
70
+ image (Image.Image): Source image
71
+
72
+ Returns:
73
+ Image.Image: Center-cropped image
74
+ """
75
+ width, height = image.size
76
+ size = min(width, height)
77
+ left = (width - size) // 2
78
+ top = (height - size) // 2
79
+ right = left + size
80
+ bottom = top + size
81
+
82
+ return image.crop((left, top, right, bottom))
83
+
84
+ def create_padding_mask(self, image: Image.Image) -> Image.Image:
85
+ """
86
+ Generate a mask for padding regions.
87
+
88
+ Args:
89
+ image (Image.Image): Source image
90
+
91
+ Returns:
92
+ Image.Image: Mask indicating padding regions
93
+ """
94
+ mask = Image.new('L', image.size, 0)
95
+ mask_array = np.array(mask)
96
+
97
+ # Set padding regions to white (255)
98
+ mask_array[:self.padding_size, :] = 255 # Top
99
+ mask_array[-self.padding_size:, :] = 255 # Bottom
100
+ mask_array[:, :self.padding_size] = 255 # Left
101
+ mask_array[:, -self.padding_size:] = 255 # Right
102
+
103
+ return Image.fromarray(mask_array)
104
+
105
+ def pad_image(self, image: Image.Image) -> Image.Image:
106
+ """
107
+ Add padding around the image.
108
+
109
+ Args:
110
+ image (Image.Image): Source image
111
+
112
+ Returns:
113
+ Image.Image: Padded image
114
+ """
115
+ padded_size = (
116
+ image.width + 2 * self.padding_size,
117
+ image.height + 2 * self.padding_size
118
+ )
119
+ padded_image = Image.new('RGBA', padded_size, (0, 0, 0, 0))
120
+ padded_image.paste(image, (self.padding_size, self.padding_size))
121
+ return padded_image
122
+
123
+ def predict_outpainting(
124
+ self,
125
+ image: Image.Image,
126
+ mask_image: Image.Image,
127
+ prompt: str
128
+ ) -> Image.Image:
129
+ """
130
+ Call AI inference endpoint for outpainting.
131
+
132
+ Args:
133
+ image (Image.Image): Base image
134
+ mask_image (Image.Image): Padding mask
135
+ prompt (str): Outpainting generation prompt
136
+
137
+ Returns:
138
+ Image.Image: Outpainted result
139
+ """
140
+ payload = {
141
+ "inputs": prompt,
142
+ "image": self.encode_image(image),
143
+ "mask_image": self.encode_image(mask_image)
144
+ }
145
+
146
+ try:
147
+ response = requests.post(
148
+ self.endpoint_url,
149
+ headers=self.headers,
150
+ json=payload
151
+ )
152
+ response.raise_for_status()
153
+ return Image.open(io.BytesIO(response.content))
154
+ except requests.RequestException as e:
155
+ print(f"Outpainting request failed: {e}")
156
+ return image
157
+
158
+ def process_iterative_outpainting(
159
+ self,
160
+ initial_image: Image.Image,
161
+ prompt: str
162
+ ) -> Image.Image:
163
+ """
164
+ Execute iterative outpainting process.
165
+
166
+ Args:
167
+ initial_image (Image.Image): Starting image
168
+ prompt (str): Generation prompt
169
+
170
+ Returns:
171
+ Image.Image: Final outpainted image
172
+ """
173
+ current_image = self.crop_to_center(initial_image)
174
+
175
+ for iteration in range(self.max_iterations):
176
+ padded_image = self.pad_image(current_image)
177
+ mask = self.create_padding_mask(padded_image)
178
+
179
+ current_image = self.predict_outpainting(
180
+ padded_image, mask, prompt
181
+ )
182
+
183
+ return current_image
184
+
185
+ def run(
186
+ self,
187
+ image_path: str,
188
+ prompt: str
189
+ ) -> Dict[str, Any]:
190
+ """
191
+ Main processing method for dynamic outpainting.
192
+
193
+ Args:
194
+ image_path (str): Path to input image
195
+ prompt (str): Outpainting generation prompt
196
+
197
+ Returns:
198
+ Dict containing processing results
199
+ """
200
+ try:
201
+ initial_image = Image.open(image_path)
202
+ result_image = self.process_iterative_outpainting(
203
+ initial_image, prompt
204
+ )
205
+
206
+ # Optional: Save result
207
+ result_path = f"outpainted_result_{id(self)}.png"
208
+ result_image.save(result_path)
209
+
210
+ return {
211
+ "status": "success",
212
+ "result_path": result_path,
213
+ "iterations": self.max_iterations
214
+ }
215
+
216
+ except Exception as e:
217
+ return {
218
+ "status": "error",
219
+ "message": str(e)
220
+ }
221
+
222
+ # Usage Example
223
+ def main():
224
+ outpainter = DynamicImageOutpainter(
225
+ endpoint_url="https://your-ai-endpoint.com",
226
+ api_token="your_huggingface_token",
227
+ padding_size=256,
228
+ max_iterations=3
229
+ )
230
+
231
+ result = outpainter.run(
232
+ image_path="input_image.png",
233
+ prompt="Expand the scene with natural, seamless background"
234
+ )
235
+
236
+ print(result)
237
+
238
+ if __name__ == "__main__":
239
+ main()