LPX55
commited on
Commit
·
4380e0f
1
Parent(s):
eff3634
add: jpeg compression estimator
Browse files- app.py +1 -0
- forensics/__init__.py +4 -2
- forensics/jpeg_compression.py +157 -0
app.py
CHANGED
@@ -26,6 +26,7 @@ from forensics.minmax import minmax_process
|
|
26 |
from forensics.ela import ELA
|
27 |
from forensics.wavelet import noise_estimation
|
28 |
from forensics.bitplane import bit_plane_extractor
|
|
|
29 |
from utils.hf_logger import log_inference_data
|
30 |
from utils.load import load_image
|
31 |
from agents.ensemble_team import EnsembleMonitorAgent, WeightOptimizationAgent, SystemHealthAgent
|
|
|
26 |
from forensics.ela import ELA
|
27 |
from forensics.wavelet import noise_estimation
|
28 |
from forensics.bitplane import bit_plane_extractor
|
29 |
+
from forensics.jpeg_compression import estimate_qf
|
30 |
from utils.hf_logger import log_inference_data
|
31 |
from utils.load import load_image
|
32 |
from agents.ensemble_team import EnsembleMonitorAgent, WeightOptimizationAgent, SystemHealthAgent
|
forensics/__init__.py
CHANGED
@@ -4,6 +4,7 @@ from .ela import ELA
|
|
4 |
from .gradient import gradient_processing
|
5 |
from .minmax import minmax_process
|
6 |
from .wavelet import noise_estimation
|
|
|
7 |
|
8 |
__all__ = [
|
9 |
'bit_plane_extractor',
|
@@ -11,5 +12,6 @@ __all__ = [
|
|
11 |
# 'exif_full_dump',
|
12 |
'gradient_processing',
|
13 |
'minmax_process',
|
14 |
-
'noise_estimation'
|
15 |
-
|
|
|
|
4 |
from .gradient import gradient_processing
|
5 |
from .minmax import minmax_process
|
6 |
from .wavelet import noise_estimation
|
7 |
+
from .jpeg_compression import estimate_qf
|
8 |
|
9 |
__all__ = [
|
10 |
'bit_plane_extractor',
|
|
|
12 |
# 'exif_full_dump',
|
13 |
'gradient_processing',
|
14 |
'minmax_process',
|
15 |
+
'noise_estimation',
|
16 |
+
'estimate_qf'
|
17 |
+
]
|
forensics/jpeg_compression.py
ADDED
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2 as cv
|
2 |
+
import numpy as np
|
3 |
+
from PIL import Image
|
4 |
+
|
5 |
+
DCT_SIZE = 8
|
6 |
+
TABLE_SIZE = DCT_SIZE ** 2
|
7 |
+
ZIG_ZAG = [
|
8 |
+
[0, 0],
|
9 |
+
[0, 1],
|
10 |
+
[1, 0],
|
11 |
+
[2, 0],
|
12 |
+
[1, 1],
|
13 |
+
[0, 2],
|
14 |
+
[0, 3],
|
15 |
+
[1, 2],
|
16 |
+
[2, 1],
|
17 |
+
[3, 0],
|
18 |
+
[4, 0],
|
19 |
+
[3, 1],
|
20 |
+
[2, 2],
|
21 |
+
[1, 3],
|
22 |
+
[0, 4],
|
23 |
+
[0, 5],
|
24 |
+
[1, 4],
|
25 |
+
[2, 3],
|
26 |
+
[3, 2],
|
27 |
+
[4, 1],
|
28 |
+
[5, 0],
|
29 |
+
[6, 0],
|
30 |
+
[5, 1],
|
31 |
+
[4, 2],
|
32 |
+
[3, 3],
|
33 |
+
[2, 4],
|
34 |
+
[1, 5],
|
35 |
+
[0, 6],
|
36 |
+
[0, 7],
|
37 |
+
[1, 6],
|
38 |
+
[2, 5],
|
39 |
+
[3, 4],
|
40 |
+
[4, 4],
|
41 |
+
[5, 3],
|
42 |
+
[6, 2],
|
43 |
+
[7, 1],
|
44 |
+
[7, 2],
|
45 |
+
[6, 3],
|
46 |
+
[5, 4],
|
47 |
+
[4, 5],
|
48 |
+
[3, 5],
|
49 |
+
[2, 6],
|
50 |
+
[1, 7],
|
51 |
+
[2, 7],
|
52 |
+
[3, 6],
|
53 |
+
[4, 5],
|
54 |
+
[5, 4],
|
55 |
+
[6, 3],
|
56 |
+
[7, 2],
|
57 |
+
[7, 3],
|
58 |
+
[6, 4],
|
59 |
+
[5, 5],
|
60 |
+
[4, 6],
|
61 |
+
[3, 7],
|
62 |
+
[4, 7],
|
63 |
+
[5, 6],
|
64 |
+
[6, 5],
|
65 |
+
[7, 4],
|
66 |
+
[7, 5],
|
67 |
+
[6, 6],
|
68 |
+
[5, 7],
|
69 |
+
[6, 7],
|
70 |
+
[7, 6],
|
71 |
+
[7, 7],
|
72 |
+
]
|
73 |
+
|
74 |
+
|
75 |
+
def compress_jpg(image: Image.Image, quality, color=True):
|
76 |
+
"""Compress a PIL image to JPEG format with specified quality.
|
77 |
+
|
78 |
+
Args:
|
79 |
+
image: Input PIL image (RGB format)
|
80 |
+
quality: JPEG compression quality (1-100)
|
81 |
+
color: Whether to preserve color (BGR format)
|
82 |
+
|
83 |
+
Returns:
|
84 |
+
np.ndarray: Decompressed image in BGR or grayscale format
|
85 |
+
"""
|
86 |
+
# Convert PIL image to OpenCV BGR format
|
87 |
+
img_np = np.array(image)
|
88 |
+
if color:
|
89 |
+
img_np = cv.cvtColor(img_np, cv.COLOR_RGB2BGR)
|
90 |
+
|
91 |
+
_, buffer = cv.imencode(".jpg", img_np, [cv.IMWRITE_JPEG_QUALITY, quality])
|
92 |
+
return cv.imdecode(buffer, cv.IMREAD_COLOR if color else cv.IMREAD_GRAYSCALE)
|
93 |
+
|
94 |
+
|
95 |
+
def loss_curve(image: Image.Image, qualities=tuple(range(1, 101)), normalize=True):
|
96 |
+
"""Calculate JPEG compression loss curve for quality estimation.
|
97 |
+
|
98 |
+
Args:
|
99 |
+
image: Input PIL image (RGB format)
|
100 |
+
qualities: Quality values to test (1-100)
|
101 |
+
normalize: Whether to normalize the output curve
|
102 |
+
|
103 |
+
Returns:
|
104 |
+
np.ndarray: Mean absolute difference values across quality levels
|
105 |
+
"""
|
106 |
+
# Convert input image to grayscale BGR for compression testing
|
107 |
+
img_np = np.array(image)
|
108 |
+
if len(img_np.shape) == 3:
|
109 |
+
x = cv.cvtColor(img_np, cv.COLOR_RGB2GRAY)
|
110 |
+
else:
|
111 |
+
x = img_np
|
112 |
+
|
113 |
+
c = np.array(
|
114 |
+
[cv.mean(cv.absdiff(compress_jpg(x, q, False), x))[0] for q in qualities]
|
115 |
+
)
|
116 |
+
if normalize:
|
117 |
+
c = cv.normalize(c, None, 0, 1, cv.NORM_MINMAX).flatten()
|
118 |
+
return c
|
119 |
+
|
120 |
+
|
121 |
+
def estimate_qf(image):
|
122 |
+
return np.argmin(loss_curve(image))
|
123 |
+
|
124 |
+
|
125 |
+
def get_tables(quality):
|
126 |
+
luma = np.array(
|
127 |
+
[
|
128 |
+
[16, 11, 10, 16, 24, 40, 51, 61],
|
129 |
+
[12, 12, 14, 19, 26, 58, 60, 55],
|
130 |
+
[14, 13, 16, 24, 40, 57, 69, 56],
|
131 |
+
[14, 17, 22, 29, 51, 87, 80, 62],
|
132 |
+
[18, 22, 37, 56, 68, 109, 103, 77],
|
133 |
+
[24, 35, 55, 64, 81, 104, 113, 92],
|
134 |
+
[49, 64, 78, 87, 103, 121, 120, 101],
|
135 |
+
[72, 92, 95, 98, 112, 100, 103, 99],
|
136 |
+
]
|
137 |
+
)
|
138 |
+
chroma = np.array(
|
139 |
+
[
|
140 |
+
[17, 18, 24, 47, 99, 99, 99, 99],
|
141 |
+
[18, 21, 26, 66, 99, 99, 99, 99],
|
142 |
+
[24, 26, 56, 99, 99, 99, 99, 99],
|
143 |
+
[47, 66, 99, 99, 99, 99, 99, 99],
|
144 |
+
[99, 99, 99, 99, 99, 99, 99, 99],
|
145 |
+
[99, 99, 99, 99, 99, 99, 99, 99],
|
146 |
+
[99, 99, 99, 99, 99, 99, 99, 99],
|
147 |
+
[99, 99, 99, 99, 99, 99, 99, 99],
|
148 |
+
]
|
149 |
+
)
|
150 |
+
quality = np.clip(quality, 1, 100)
|
151 |
+
if quality < 50:
|
152 |
+
quality = 5000 / quality
|
153 |
+
else:
|
154 |
+
quality = 200 - quality * 2
|
155 |
+
tables = np.concatenate((luma[:, :, np.newaxis], chroma[:, :, np.newaxis]), axis=2)
|
156 |
+
tables = (tables * quality + 50) / 100
|
157 |
+
return np.clip(tables, 1, 255).astype(int)
|