eienmojiki commited on
Commit
7bf72c3
·
1 Parent(s): e6956bc

Refactor image processing functions and add effects

Browse files

- Cleaned up code formatting
- Added black and white effect
- Implemented sepia tone effect
- Introduced negative image effect
- Added watercolor and posterization effects

Files changed (1) hide show
  1. filters.py +173 -57
filters.py CHANGED
@@ -167,15 +167,15 @@ def warm_filter(image, intensity: int = 30):
167
  """
168
  # Convert intensity to actual adjustment values
169
  intensity_scale = intensity / 100.0
170
-
171
  # Split the image into BGR channels
172
  b, g, r = cv2.split(image.astype(np.float32))
173
-
174
  # Increase red, slightly increase green, decrease blue
175
  r = np.clip(r * (1 + 0.5 * intensity_scale), 0, 255)
176
  g = np.clip(g * (1 + 0.1 * intensity_scale), 0, 255)
177
  b = np.clip(b * (1 - 0.1 * intensity_scale), 0, 255)
178
-
179
  return cv2.merge([b, g, r]).astype(np.uint8)
180
 
181
 
@@ -201,15 +201,15 @@ def cool_filter(image, intensity: int = 30):
201
  """
202
  # Convert intensity to actual adjustment values
203
  intensity_scale = intensity / 100.0
204
-
205
  # Split the image into BGR channels
206
  b, g, r = cv2.split(image.astype(np.float32))
207
-
208
  # Increase blue, slightly increase green, decrease red
209
  b = np.clip(b * (1 + 0.5 * intensity_scale), 0, 255)
210
  g = np.clip(g * (1 + 0.1 * intensity_scale), 0, 255)
211
  r = np.clip(r * (1 - 0.1 * intensity_scale), 0, 255)
212
-
213
  return cv2.merge([b, g, r]).astype(np.uint8)
214
 
215
 
@@ -235,13 +235,13 @@ def adjust_saturation(image, factor: int = 50):
235
  """
236
  # Convert factor to multiplication value (0.0 to 2.0)
237
  factor = (factor / 50.0)
238
-
239
  # Convert to HSV
240
  hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32)
241
-
242
  # Adjust saturation
243
  hsv[:, :, 1] = np.clip(hsv[:, :, 1] * factor, 0, 255)
244
-
245
  # Convert back to BGR
246
  return cv2.cvtColor(hsv.astype(np.uint8), cv2.COLOR_HSV2BGR)
247
 
@@ -267,23 +267,23 @@ def vintage_filter(image, intensity: int = 50):
267
  * `numpy.ndarray`: Image with vintage effect
268
  """
269
  intensity_scale = intensity / 100.0
270
-
271
  # Split channels
272
  b, g, r = cv2.split(image.astype(np.float32))
273
-
274
  # Adjust colors for vintage look
275
  r = np.clip(r * (1 + 0.3 * intensity_scale), 0, 255)
276
  g = np.clip(g * (1 - 0.1 * intensity_scale), 0, 255)
277
  b = np.clip(b * (1 - 0.2 * intensity_scale), 0, 255)
278
-
279
  # Create sepia-like effect
280
  result = cv2.merge([b, g, r]).astype(np.uint8)
281
-
282
  # Add slight blur for softness
283
  if intensity > 0:
284
  blur_amount = int(3 * intensity_scale) * 2 + 1
285
  result = cv2.GaussianBlur(result, (blur_amount, blur_amount), 0)
286
-
287
  return result
288
 
289
 
@@ -308,21 +308,21 @@ def vignette_effect(image, intensity: int = 50):
308
  * `numpy.ndarray`: Image with vignette effect
309
  """
310
  height, width = image.shape[:2]
311
-
312
  # Create a vignette mask
313
  X_resultant = np.abs(np.linspace(-1, 1, width)[None, :])
314
  Y_resultant = np.abs(np.linspace(-1, 1, height)[:, None])
315
  mask = np.sqrt(X_resultant**2 + Y_resultant**2)
316
  mask = 1 - np.clip(mask, 0, 1)
317
-
318
  # Adjust mask based on intensity
319
  mask = (mask - mask.min()) / (mask.max() - mask.min())
320
  mask = mask ** (1 + intensity/50)
321
-
322
  # Apply mask to image
323
  mask = mask[:, :, None]
324
  result = image.astype(np.float32) * mask
325
-
326
  return np.clip(result, 0, 255).astype(np.uint8)
327
 
328
 
@@ -347,27 +347,30 @@ def hdr_effect(image, strength: int = 50):
347
  * `numpy.ndarray`: Image with HDR-like effect
348
  """
349
  strength_scale = strength / 100.0
350
-
351
  # Convert to LAB color space
352
  lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB).astype(np.float32)
353
-
354
  # Split channels
355
  l, a, b = cv2.split(lab)
356
-
357
  # Apply CLAHE to L channel
358
- clahe = cv2.createCLAHE(clipLimit=3.0 * strength_scale, tileGridSize=(8, 8))
 
359
  l = clahe.apply(l.astype(np.uint8)).astype(np.float32)
360
-
361
  # Enhance local contrast
362
  if strength > 0:
363
  blur = cv2.GaussianBlur(l, (0, 0), 3)
364
- detail = cv2.addWeighted(l, 1 + strength_scale, blur, -strength_scale, 0)
365
- l = cv2.addWeighted(l, 1 - strength_scale/2, detail, strength_scale/2, 0)
366
-
 
 
367
  # Merge channels and convert back
368
  enhanced_lab = cv2.merge([l, a, b])
369
  result = cv2.cvtColor(enhanced_lab.astype(np.uint8), cv2.COLOR_LAB2BGR)
370
-
371
  return result
372
 
373
 
@@ -394,7 +397,7 @@ def gaussian_blur(image, kernel_size: int = 5):
394
  # Ensure kernel size is odd
395
  if kernel_size % 2 == 0:
396
  kernel_size += 1
397
-
398
  return cv2.GaussianBlur(image, (kernel_size, kernel_size), 0)
399
 
400
 
@@ -419,15 +422,15 @@ def sharpen(image, amount: int = 50):
419
  * `numpy.ndarray`: Sharpened image
420
  """
421
  amount = amount / 100.0
422
-
423
  # Create the sharpening kernel
424
- kernel = np.array([[-1,-1,-1],
425
- [-1, 9,-1],
426
- [-1,-1,-1]])
427
-
428
  # Apply the kernel
429
  sharpened = cv2.filter2D(image, -1, kernel)
430
-
431
  # Blend with original image based on amount
432
  return cv2.addWeighted(image, 1 - amount, sharpened, amount, 0)
433
 
@@ -458,43 +461,43 @@ def emboss(image, strength: int = 50, direction: int = 0):
458
  * `numpy.ndarray`: Embossed image
459
  """
460
  strength = strength / 100.0 * 2.0 # Scale to 0-2 range
461
-
462
  # Define kernels for different directions
463
  kernels = [
464
- np.array([[-1,-1, 0],
465
  [-1, 1, 1],
466
- [ 0, 1, 1]]), # 0 - top left to bottom right
467
  np.array([[-1, 0, 1],
468
  [-1, 1, 1],
469
  [-1, 0, 1]]), # 1 - left to right
470
- np.array([[ 0, 1, 1],
471
  [-1, 1, 1],
472
- [-1,-1, 0]]), # 2 - bottom left to top right
473
- np.array([[ 1, 1, 1],
474
- [ 0, 1, 0],
475
- [-1,-1,-1]]), # 3 - bottom to top
476
- np.array([[ 1, 1, 0],
477
- [ 1, 1,-1],
478
- [ 0,-1,-1]]), # 4 - bottom right to top left
479
- np.array([[ 1, 0,-1],
480
- [ 1, 1,-1],
481
- [ 1, 0,-1]]), # 5 - right to left
482
- np.array([[ 0,-1,-1],
483
- [ 1, 1,-1],
484
- [ 1, 1, 0]]), # 6 - top right to bottom left
485
- np.array([[-1,-1,-1],
486
- [ 0, 1, 0],
487
- [ 1, 1, 1]]) # 7 - top to bottom
488
  ]
489
-
490
  # Apply the kernel
491
  kernel = kernels[direction % 8]
492
  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
493
  embossed = cv2.filter2D(gray, -1, kernel * strength)
494
-
495
  # Normalize to ensure good contrast
496
  embossed = cv2.normalize(embossed, None, 0, 255, cv2.NORM_MINMAX)
497
-
498
  # Convert back to BGR
499
  return cv2.cvtColor(embossed.astype(np.uint8), cv2.COLOR_GRAY2BGR)
500
 
@@ -526,3 +529,116 @@ def oil_painting(image, size: int = 5, dynRatio: int = 1):
526
  """
527
  return cv2.xphoto.oilPainting(image, size, dynRatio)
528
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
  """
168
  # Convert intensity to actual adjustment values
169
  intensity_scale = intensity / 100.0
170
+
171
  # Split the image into BGR channels
172
  b, g, r = cv2.split(image.astype(np.float32))
173
+
174
  # Increase red, slightly increase green, decrease blue
175
  r = np.clip(r * (1 + 0.5 * intensity_scale), 0, 255)
176
  g = np.clip(g * (1 + 0.1 * intensity_scale), 0, 255)
177
  b = np.clip(b * (1 - 0.1 * intensity_scale), 0, 255)
178
+
179
  return cv2.merge([b, g, r]).astype(np.uint8)
180
 
181
 
 
201
  """
202
  # Convert intensity to actual adjustment values
203
  intensity_scale = intensity / 100.0
204
+
205
  # Split the image into BGR channels
206
  b, g, r = cv2.split(image.astype(np.float32))
207
+
208
  # Increase blue, slightly increase green, decrease red
209
  b = np.clip(b * (1 + 0.5 * intensity_scale), 0, 255)
210
  g = np.clip(g * (1 + 0.1 * intensity_scale), 0, 255)
211
  r = np.clip(r * (1 - 0.1 * intensity_scale), 0, 255)
212
+
213
  return cv2.merge([b, g, r]).astype(np.uint8)
214
 
215
 
 
235
  """
236
  # Convert factor to multiplication value (0.0 to 2.0)
237
  factor = (factor / 50.0)
238
+
239
  # Convert to HSV
240
  hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32)
241
+
242
  # Adjust saturation
243
  hsv[:, :, 1] = np.clip(hsv[:, :, 1] * factor, 0, 255)
244
+
245
  # Convert back to BGR
246
  return cv2.cvtColor(hsv.astype(np.uint8), cv2.COLOR_HSV2BGR)
247
 
 
267
  * `numpy.ndarray`: Image with vintage effect
268
  """
269
  intensity_scale = intensity / 100.0
270
+
271
  # Split channels
272
  b, g, r = cv2.split(image.astype(np.float32))
273
+
274
  # Adjust colors for vintage look
275
  r = np.clip(r * (1 + 0.3 * intensity_scale), 0, 255)
276
  g = np.clip(g * (1 - 0.1 * intensity_scale), 0, 255)
277
  b = np.clip(b * (1 - 0.2 * intensity_scale), 0, 255)
278
+
279
  # Create sepia-like effect
280
  result = cv2.merge([b, g, r]).astype(np.uint8)
281
+
282
  # Add slight blur for softness
283
  if intensity > 0:
284
  blur_amount = int(3 * intensity_scale) * 2 + 1
285
  result = cv2.GaussianBlur(result, (blur_amount, blur_amount), 0)
286
+
287
  return result
288
 
289
 
 
308
  * `numpy.ndarray`: Image with vignette effect
309
  """
310
  height, width = image.shape[:2]
311
+
312
  # Create a vignette mask
313
  X_resultant = np.abs(np.linspace(-1, 1, width)[None, :])
314
  Y_resultant = np.abs(np.linspace(-1, 1, height)[:, None])
315
  mask = np.sqrt(X_resultant**2 + Y_resultant**2)
316
  mask = 1 - np.clip(mask, 0, 1)
317
+
318
  # Adjust mask based on intensity
319
  mask = (mask - mask.min()) / (mask.max() - mask.min())
320
  mask = mask ** (1 + intensity/50)
321
+
322
  # Apply mask to image
323
  mask = mask[:, :, None]
324
  result = image.astype(np.float32) * mask
325
+
326
  return np.clip(result, 0, 255).astype(np.uint8)
327
 
328
 
 
347
  * `numpy.ndarray`: Image with HDR-like effect
348
  """
349
  strength_scale = strength / 100.0
350
+
351
  # Convert to LAB color space
352
  lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB).astype(np.float32)
353
+
354
  # Split channels
355
  l, a, b = cv2.split(lab)
356
+
357
  # Apply CLAHE to L channel
358
+ clahe = cv2.createCLAHE(
359
+ clipLimit=3.0 * strength_scale, tileGridSize=(8, 8))
360
  l = clahe.apply(l.astype(np.uint8)).astype(np.float32)
361
+
362
  # Enhance local contrast
363
  if strength > 0:
364
  blur = cv2.GaussianBlur(l, (0, 0), 3)
365
+ detail = cv2.addWeighted(
366
+ l, 1 + strength_scale, blur, -strength_scale, 0)
367
+ l = cv2.addWeighted(l, 1 - strength_scale/2,
368
+ detail, strength_scale/2, 0)
369
+
370
  # Merge channels and convert back
371
  enhanced_lab = cv2.merge([l, a, b])
372
  result = cv2.cvtColor(enhanced_lab.astype(np.uint8), cv2.COLOR_LAB2BGR)
373
+
374
  return result
375
 
376
 
 
397
  # Ensure kernel size is odd
398
  if kernel_size % 2 == 0:
399
  kernel_size += 1
400
+
401
  return cv2.GaussianBlur(image, (kernel_size, kernel_size), 0)
402
 
403
 
 
422
  * `numpy.ndarray`: Sharpened image
423
  """
424
  amount = amount / 100.0
425
+
426
  # Create the sharpening kernel
427
+ kernel = np.array([[-1, -1, -1],
428
+ [-1, 9, -1],
429
+ [-1, -1, -1]])
430
+
431
  # Apply the kernel
432
  sharpened = cv2.filter2D(image, -1, kernel)
433
+
434
  # Blend with original image based on amount
435
  return cv2.addWeighted(image, 1 - amount, sharpened, amount, 0)
436
 
 
461
  * `numpy.ndarray`: Embossed image
462
  """
463
  strength = strength / 100.0 * 2.0 # Scale to 0-2 range
464
+
465
  # Define kernels for different directions
466
  kernels = [
467
+ np.array([[-1, -1, 0],
468
  [-1, 1, 1],
469
+ [0, 1, 1]]), # 0 - top left to bottom right
470
  np.array([[-1, 0, 1],
471
  [-1, 1, 1],
472
  [-1, 0, 1]]), # 1 - left to right
473
+ np.array([[0, 1, 1],
474
  [-1, 1, 1],
475
+ [-1, -1, 0]]), # 2 - bottom left to top right
476
+ np.array([[1, 1, 1],
477
+ [0, 1, 0],
478
+ [-1, -1, -1]]), # 3 - bottom to top
479
+ np.array([[1, 1, 0],
480
+ [1, 1, -1],
481
+ [0, -1, -1]]), # 4 - bottom right to top left
482
+ np.array([[1, 0, -1],
483
+ [1, 1, -1],
484
+ [1, 0, -1]]), # 5 - right to left
485
+ np.array([[0, -1, -1],
486
+ [1, 1, -1],
487
+ [1, 1, 0]]), # 6 - top right to bottom left
488
+ np.array([[-1, -1, -1],
489
+ [0, 1, 0],
490
+ [1, 1, 1]]) # 7 - top to bottom
491
  ]
492
+
493
  # Apply the kernel
494
  kernel = kernels[direction % 8]
495
  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
496
  embossed = cv2.filter2D(gray, -1, kernel * strength)
497
+
498
  # Normalize to ensure good contrast
499
  embossed = cv2.normalize(embossed, None, 0, 255, cv2.NORM_MINMAX)
500
+
501
  # Convert back to BGR
502
  return cv2.cvtColor(embossed.astype(np.uint8), cv2.COLOR_GRAY2BGR)
503
 
 
529
  """
530
  return cv2.xphoto.oilPainting(image, size, dynRatio)
531
 
532
+
533
+ @registry.register("Black and White")
534
+ def black_and_white(image):
535
+ """
536
+ ## Convert image to classic black and white.
537
+
538
+ **Args:**
539
+ * `image` (numpy.ndarray): Input image (BGR)
540
+
541
+ **Returns:**
542
+ * `numpy.ndarray`: Grayscale image
543
+ """
544
+ return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
545
+
546
+
547
+ @registry.register("Sepia")
548
+ def sepia(image):
549
+ """
550
+ ## Apply a warm sepia tone effect.
551
+
552
+ **Args:**
553
+ * `image` (numpy.ndarray): Input image (BGR)
554
+
555
+ **Returns:**
556
+ * `numpy.ndarray`: Sepia toned image
557
+ """
558
+ # Convert to RGB
559
+ rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
560
+
561
+ # Apply sepia matrix
562
+ sepia_matrix = np.array([
563
+ [0.393, 0.769, 0.189],
564
+ [0.349, 0.686, 0.168],
565
+ [0.272, 0.534, 0.131]
566
+ ])
567
+
568
+ sepia_image = np.dot(rgb, sepia_matrix.T)
569
+ sepia_image = np.clip(sepia_image, 0, 255)
570
+
571
+ return cv2.cvtColor(sepia_image.astype(np.uint8), cv2.COLOR_RGB2BGR)
572
+
573
+
574
+ @registry.register("Negative")
575
+ def negative(image):
576
+ """
577
+ ## Invert colors to create a negative effect.
578
+
579
+ **Args:**
580
+ * `image` (numpy.ndarray): Input image (BGR)
581
+
582
+ **Returns:**
583
+ * `numpy.ndarray`: Negative image
584
+ """
585
+ return cv2.bitwise_not(image)
586
+
587
+
588
+ @registry.register("Watercolor")
589
+ def watercolor(image):
590
+ """
591
+ ## Apply a watercolor painting effect.
592
+
593
+ **Args:**
594
+ * `image` (numpy.ndarray): Input image (BGR)
595
+
596
+ **Returns:**
597
+ * `numpy.ndarray`: Watercolor effect image
598
+ """
599
+ # Apply bilateral filter to create watercolor effect
600
+ return cv2.xphoto.oilPainting(image, 7, 1)
601
+
602
+
603
+ @registry.register("Posterization")
604
+ def posterize(image):
605
+ """
606
+ ## Reduce colors to create a posterization effect.
607
+
608
+ **Args:**
609
+ * `image` (numpy.ndarray): Input image (BGR)
610
+
611
+ **Returns:**
612
+ * `numpy.ndarray`: Posterized image
613
+ """
614
+ # Convert to HSV
615
+ hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
616
+
617
+ # Reduce color levels
618
+ hsv[:, :, 1] = cv2.equalizeHist(hsv[:, :, 1])
619
+ hsv[:, :, 2] = cv2.equalizeHist(hsv[:, :, 2])
620
+
621
+ # Convert back to BGR
622
+ return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
623
+
624
+
625
+ @registry.register("Cross Process")
626
+ def cross_process(image):
627
+ """
628
+ ## Apply a film cross-processing effect.
629
+
630
+ **Args:**
631
+ * `image` (numpy.ndarray): Input image (BGR)
632
+
633
+ **Returns:**
634
+ * `numpy.ndarray`: Cross-processed image
635
+ """
636
+ # Split channels
637
+ b, g, r = cv2.split(image.astype(np.float32))
638
+
639
+ # Apply cross-process transformation
640
+ b = np.clip(b * 1.2, 0, 255)
641
+ g = np.clip(g * 0.8, 0, 255)
642
+ r = np.clip(r * 1.4, 0, 255)
643
+
644
+ return cv2.merge([b, g, r]).astype(np.uint8)