LPX
commited on
Commit
Β·
5ccc3f6
1
Parent(s):
1a8be6b
π fix(app): add input degrees having a bug for cases without augmentation
Browse files- modify `predict_image_with_html` function to accept `augment_method` and `rotate_degrees`
- add filter to conditionally show rotate slider
- modify `augment_image` function to accept `methods` and `rotate_degrees` parameters and apply the chosen augmentation methods accordingly
β¨ feat(app): expand augmentation methods
- add new checkbox group to let user select from multiple augmentation methods such as rotate
π docs: update imports comment and reorganize
- type hint comments for function return values
- add new filters to `PIL` Libraries for image manipulation
- app.py +9 -6
- utils/utils.py +11 -11
app.py
CHANGED
@@ -200,15 +200,15 @@ def generate_results_html(results):
|
|
200 |
"""
|
201 |
return html_content
|
202 |
|
203 |
-
def predict_image_with_html(img, confidence_threshold,
|
204 |
-
if
|
205 |
-
img_pil, _ = augment_image(img)
|
206 |
else:
|
207 |
img_pil = img
|
208 |
img_pil, results = predict_image(img_pil, confidence_threshold)
|
209 |
html_content = generate_results_html(results)
|
210 |
return img_pil, html_content
|
211 |
-
|
212 |
with gr.Blocks() as iface:
|
213 |
gr.Markdown("# AI Generated Image / Deepfake Detection Models Evaluation")
|
214 |
|
@@ -217,8 +217,9 @@ with gr.Blocks() as iface:
|
|
217 |
image_input = gr.Image(label="Upload Image to Analyze", sources=['upload'], type='pil')
|
218 |
with gr.Accordion("Settings", open=False, elem_id="settings_accordion"):
|
219 |
confidence_slider = gr.Slider(0.0, 1.0, value=0.5, step=0.01, label="Confidence Threshold")
|
220 |
-
|
221 |
-
|
|
|
222 |
predict_button = gr.Button("Predict")
|
223 |
with gr.Column(scale=2):
|
224 |
with gr.Accordion("Project OpenSight - Model Evaluations & Playground", open=False, elem_id="project_accordion"):
|
@@ -228,6 +229,8 @@ with gr.Blocks() as iface:
|
|
228 |
results_html = gr.HTML(label="Model Predictions")
|
229 |
outputs = [image_output, results_html]
|
230 |
|
|
|
|
|
231 |
predict_button.click(
|
232 |
fn=predict_image_with_html,
|
233 |
inputs=inputs,
|
|
|
200 |
"""
|
201 |
return html_content
|
202 |
|
203 |
+
def predict_image_with_html(img, confidence_threshold, augment_method, rotate_degrees):
|
204 |
+
if augment_method != "none":
|
205 |
+
img_pil, _ = augment_image(img, augment_method, rotate_degrees)
|
206 |
else:
|
207 |
img_pil = img
|
208 |
img_pil, results = predict_image(img_pil, confidence_threshold)
|
209 |
html_content = generate_results_html(results)
|
210 |
return img_pil, html_content
|
211 |
+
|
212 |
with gr.Blocks() as iface:
|
213 |
gr.Markdown("# AI Generated Image / Deepfake Detection Models Evaluation")
|
214 |
|
|
|
217 |
image_input = gr.Image(label="Upload Image to Analyze", sources=['upload'], type='pil')
|
218 |
with gr.Accordion("Settings", open=False, elem_id="settings_accordion"):
|
219 |
confidence_slider = gr.Slider(0.0, 1.0, value=0.5, step=0.01, label="Confidence Threshold")
|
220 |
+
augment_checkboxgroup = gr.CheckboxGroup(["rotate", "add_noise", "sharpen"], label="Augmentation Methods")
|
221 |
+
rotate_slider = gr.Slider(0, 360, value=0, step=1, label="Rotate Degrees", visible=False)
|
222 |
+
inputs = [image_input, confidence_slider, augment_checkboxgroup, rotate_slider]
|
223 |
predict_button = gr.Button("Predict")
|
224 |
with gr.Column(scale=2):
|
225 |
with gr.Accordion("Project OpenSight - Model Evaluations & Playground", open=False, elem_id="project_accordion"):
|
|
|
229 |
results_html = gr.HTML(label="Model Predictions")
|
230 |
outputs = [image_output, results_html]
|
231 |
|
232 |
+
augment_checkboxgroup.change(lambda methods: gr.Slider.update(visible="rotate" in methods), inputs=[augment_checkboxgroup], outputs=[rotate_slider])
|
233 |
+
|
234 |
predict_button.click(
|
235 |
fn=predict_image_with_html,
|
236 |
inputs=inputs,
|
utils/utils.py
CHANGED
@@ -1,22 +1,22 @@
|
|
1 |
import numpy as np
|
2 |
import io
|
3 |
-
from PIL import Image
|
4 |
from torchvision import transforms
|
5 |
|
6 |
def softmax(vector):
|
7 |
e = np.exp(vector - np.max(vector)) # for numerical stability
|
8 |
return e / e.sum()
|
9 |
|
10 |
-
def augment_image(img_pil):
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
return
|
20 |
|
21 |
def convert_pil_to_bytes(image, format='JPEG'):
|
22 |
img_byte_arr = io.BytesIO()
|
|
|
1 |
import numpy as np
|
2 |
import io
|
3 |
+
from PIL import Image, ImageFilter
|
4 |
from torchvision import transforms
|
5 |
|
6 |
def softmax(vector):
|
7 |
e = np.exp(vector - np.max(vector)) # for numerical stability
|
8 |
return e / e.sum()
|
9 |
|
10 |
+
def augment_image(img_pil, methods, rotate_degrees=0):
|
11 |
+
for method in methods:
|
12 |
+
if method == "rotate":
|
13 |
+
img_pil = img_pil.rotate(rotate_degrees)
|
14 |
+
elif method == "add_noise":
|
15 |
+
noise = np.random.normal(0, 25, img_pil.size[::-1] + (3,)).astype(np.uint8)
|
16 |
+
img_pil = Image.fromarray(np.clip(np.array(img_pil) + noise, 0, 255).astype(np.uint8))
|
17 |
+
elif method == "sharpen":
|
18 |
+
img_pil = img_pil.filter(ImageFilter.SHARPEN)
|
19 |
+
return img_pil, img_pil
|
20 |
|
21 |
def convert_pil_to_bytes(image, format='JPEG'):
|
22 |
img_byte_arr = io.BytesIO()
|