Spaces:
Running
Running
File size: 10,190 Bytes
4cc2869 3d5d69a 4cc2869 3d5d69a 7a74dd9 fd32459 856f31e 8e75d7b 856f31e 8e75d7b fd32459 7a74dd9 4cc2869 7a74dd9 4cc2869 7a74dd9 4cc2869 7a74dd9 4cc2869 7a74dd9 38b98e3 41bdd67 3d5d69a 41bdd67 3d5d69a 41bdd67 3d5d69a 41bdd67 ad0c787 3d5d69a be24af1 ad0c787 3d5d69a ad0c787 3d5d69a ad0c787 3d5d69a be24af1 ad0c787 3d5d69a ad0c787 3d5d69a 76524db d85418d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 |
import cv2
import numpy as np
from registry import registry
@registry.register("Original")
def original(image):
return image
@registry.register("Dot Effect", defaults={
"dot_size": 10,
"dot_spacing": 2,
"invert": False,
}, min_vals={
"dot_size": 1,
"dot_spacing": 1,
}, max_vals={
"dot_size": 20,
"dot_spacing": 10,
}, step_vals={
"dot_size": 1,
"dot_spacing": 1,
})
def dot_effect(image, dot_size: int = 10, dot_spacing: int = 2, invert: bool = False):
"""
## Convert your image into a dotted pattern.
**Args:**
* `image` (numpy.ndarray): Input image (BGR or grayscale)
* `dot_size` (int): Size of each dot
* `dot_spacing` (int): Spacing between dots
* `invert` (bool): Invert the dots
**Returns:**
* `numpy.ndarray`: Dotted image
"""
# Convert to grayscale if image is color
if len(image.shape) == 3:
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
gray = image
# Apply adaptive thresholding to improve contrast
gray = cv2.adaptiveThreshold(
gray,
255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY,
25, # Block size
5 # Constant subtracted from mean
)
height, width = gray.shape
canvas = np.zeros_like(gray) if not invert else np.full_like(gray, 255)
y_dots = range(0, height, dot_size + dot_spacing)
x_dots = range(0, width, dot_size + dot_spacing)
dot_color = 255 if not invert else 0
for y in y_dots:
for x in x_dots:
region = gray[y:min(y+dot_size, height), x:min(x+dot_size, width)]
if region.size > 0:
brightness = np.mean(region)
# Dynamic dot sizing based on brightness
relative_brightness = brightness / 255.0
if invert:
relative_brightness = 1 - relative_brightness
# Draw circle with size proportional to brightness
radius = int((dot_size/2) * relative_brightness)
if radius > 0:
cv2.circle(canvas,
(x + dot_size//2, y + dot_size//2),
radius,
(dot_color),
-1)
return canvas
@registry.register("Pixelize", defaults={
"pixel_size": 10,
}, min_vals={
"pixel_size": 1,
}, max_vals={
"pixel_size": 50,
}, step_vals={
"pixel_size": 1,
})
def pixelize(image, pixel_size: int = 10):
"""
## Apply a pixelization effect to the image.
**Args:**
* `image` (numpy.ndarray): Input image (BGR or grayscale)
* `pixel_size` (int): Size of each pixel block
**Returns:**
* `numpy.ndarray`: Pixelized image
"""
height, width = image.shape[:2]
# Resize the image to a smaller size
small_height = height // pixel_size
small_width = width // pixel_size
small_image = cv2.resize(
image, (small_width, small_height), interpolation=cv2.INTER_LINEAR)
# Resize back to the original size with nearest neighbor interpolation
pixelized_image = cv2.resize(
small_image, (width, height), interpolation=cv2.INTER_NEAREST)
return pixelized_image
@registry.register("Sketch Effect")
def sketch_effect(image):
"""
## Apply a sketch effect to the image.
**Args:**
* `image` (numpy.ndarray): Input image (BGR or grayscale)
**Returns:**
* `numpy.ndarray`: Sketch effect applied image
"""
# Convert the image to grayscale
if len(image.shape) == 3:
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
gray = image
# Invert the grayscale image
inverted_gray = cv2.bitwise_not(gray)
# Apply Gaussian blur to the inverted image
blurred = cv2.GaussianBlur(inverted_gray, (21, 21), 0) # Fixed kernel size
# Blend the grayscale image with the blurred inverted image
sketch = cv2.divide(gray, 255 - blurred, scale=256)
return sketch
@registry.register("Warm", defaults={
"intensity": 30,
}, min_vals={
"intensity": 0,
}, max_vals={
"intensity": 100,
}, step_vals={
"intensity": 1,
})
def warm_filter(image, intensity: int = 30):
"""
## Adds a warm color effect to the image.
**Args:**
* `image` (numpy.ndarray): Input image (BGR)
* `intensity` (int): Intensity of the warm effect (0-100)
**Returns:**
* `numpy.ndarray`: Image with warm color effect
"""
# Convert intensity to actual adjustment values
intensity_scale = intensity / 100.0
# Split the image into BGR channels
b, g, r = cv2.split(image.astype(np.float32))
# Increase red, slightly increase green, decrease blue
r = np.clip(r * (1 + 0.5 * intensity_scale), 0, 255)
g = np.clip(g * (1 + 0.1 * intensity_scale), 0, 255)
b = np.clip(b * (1 - 0.1 * intensity_scale), 0, 255)
return cv2.merge([b, g, r]).astype(np.uint8)
@registry.register("Cool", defaults={
"intensity": 30,
}, min_vals={
"intensity": 0,
}, max_vals={
"intensity": 100,
}, step_vals={
"intensity": 1,
})
def cool_filter(image, intensity: int = 30):
"""
## Adds a cool color effect to the image.
**Args:**
* `image` (numpy.ndarray): Input image (BGR)
* `intensity` (int): Intensity of the cool effect (0-100)
**Returns:**
* `numpy.ndarray`: Image with cool color effect
"""
# Convert intensity to actual adjustment values
intensity_scale = intensity / 100.0
# Split the image into BGR channels
b, g, r = cv2.split(image.astype(np.float32))
# Increase blue, slightly increase green, decrease red
b = np.clip(b * (1 + 0.5 * intensity_scale), 0, 255)
g = np.clip(g * (1 + 0.1 * intensity_scale), 0, 255)
r = np.clip(r * (1 - 0.1 * intensity_scale), 0, 255)
return cv2.merge([b, g, r]).astype(np.uint8)
@registry.register("Saturation", defaults={
"factor": 50,
}, min_vals={
"factor": 0,
}, max_vals={
"factor": 100,
}, step_vals={
"factor": 1,
})
def adjust_saturation(image, factor: int = 50):
"""
## Adjusts the saturation of the image.
**Args:**
* `image` (numpy.ndarray): Input image (BGR)
* `factor` (int): Saturation factor (0-100, 50 is normal)
**Returns:**
* `numpy.ndarray`: Image with adjusted saturation
"""
# Convert factor to multiplication value (0.0 to 2.0)
factor = (factor / 50.0)
# Convert to HSV
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32)
# Adjust saturation
hsv[:, :, 1] = np.clip(hsv[:, :, 1] * factor, 0, 255)
# Convert back to BGR
return cv2.cvtColor(hsv.astype(np.uint8), cv2.COLOR_HSV2BGR)
@registry.register("Vintage", defaults={
"intensity": 50,
}, min_vals={
"intensity": 0,
}, max_vals={
"intensity": 100,
}, step_vals={
"intensity": 1,
})
def vintage_filter(image, intensity: int = 50):
"""
## Adds a vintage/retro effect to the image.
**Args:**
* `image` (numpy.ndarray): Input image (BGR)
* `intensity` (int): Intensity of the vintage effect (0-100)
**Returns:**
* `numpy.ndarray`: Image with vintage effect
"""
intensity_scale = intensity / 100.0
# Split channels
b, g, r = cv2.split(image.astype(np.float32))
# Adjust colors for vintage look
r = np.clip(r * (1 + 0.3 * intensity_scale), 0, 255)
g = np.clip(g * (1 - 0.1 * intensity_scale), 0, 255)
b = np.clip(b * (1 - 0.2 * intensity_scale), 0, 255)
# Create sepia-like effect
result = cv2.merge([b, g, r]).astype(np.uint8)
# Add slight blur for softness
if intensity > 0:
blur_amount = int(3 * intensity_scale) * 2 + 1
result = cv2.GaussianBlur(result, (blur_amount, blur_amount), 0)
return result
@registry.register("Vignette", defaults={
"intensity": 50,
}, min_vals={
"intensity": 0,
}, max_vals={
"intensity": 100,
}, step_vals={
"intensity": 1,
})
def vignette_effect(image, intensity: int = 50):
"""
## Adds a vignette effect (darker corners) to the image.
**Args:**
* `image` (numpy.ndarray): Input image (BGR)
* `intensity` (int): Intensity of the vignette (0-100)
**Returns:**
* `numpy.ndarray`: Image with vignette effect
"""
height, width = image.shape[:2]
# Create a vignette mask
X_resultant = np.abs(np.linspace(-1, 1, width)[None, :])
Y_resultant = np.abs(np.linspace(-1, 1, height)[:, None])
mask = np.sqrt(X_resultant**2 + Y_resultant**2)
mask = 1 - np.clip(mask, 0, 1)
# Adjust mask based on intensity
mask = (mask - mask.min()) / (mask.max() - mask.min())
mask = mask ** (1 + intensity/50)
# Apply mask to image
mask = mask[:, :, None]
result = image.astype(np.float32) * mask
return np.clip(result, 0, 255).astype(np.uint8)
@registry.register("HDR Effect", defaults={
"strength": 50,
}, min_vals={
"strength": 0,
}, max_vals={
"strength": 100,
}, step_vals={
"strength": 1,
})
def hdr_effect(image, strength: int = 50):
"""
## Applies an HDR-like effect to enhance image details.
**Args:**
* `image` (numpy.ndarray): Input image (BGR)
* `strength` (int): Strength of the HDR effect (0-100)
**Returns:**
* `numpy.ndarray`: Image with HDR-like effect
"""
strength_scale = strength / 100.0
# Convert to LAB color space
lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB).astype(np.float32)
# Split channels
l, a, b = cv2.split(lab)
# Apply CLAHE to L channel
clahe = cv2.createCLAHE(clipLimit=3.0 * strength_scale, tileGridSize=(8, 8))
l = clahe.apply(l.astype(np.uint8)).astype(np.float32)
# Enhance local contrast
if strength > 0:
blur = cv2.GaussianBlur(l, (0, 0), 3)
detail = cv2.addWeighted(l, 1 + strength_scale, blur, -strength_scale, 0)
l = cv2.addWeighted(l, 1 - strength_scale/2, detail, strength_scale/2, 0)
# Merge channels and convert back
enhanced_lab = cv2.merge([l, a, b])
result = cv2.cvtColor(enhanced_lab.astype(np.uint8), cv2.COLOR_LAB2BGR)
return result
|