File size: 5,175 Bytes
14c1cae
 
 
 
 
 
76d7581
14c1cae
 
 
 
 
 
 
 
 
 
 
 
 
76d7581
14c1cae
 
 
 
 
 
 
 
76d7581
14c1cae
 
 
76d7581
14c1cae
 
 
 
76d7581
14c1cae
 
 
 
 
 
 
 
 
76d7581
14c1cae
 
 
76d7581
14c1cae
 
76d7581
14c1cae
 
 
 
 
76d7581
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14c1cae
 
 
 
 
d0f4da5
14c1cae
 
76d7581
14c1cae
 
 
76d7581
 
 
14c1cae
 
 
 
 
76d7581
 
 
 
 
 
 
 
14c1cae
76d7581
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
import gradio as gr
from dotenv import load_dotenv
from roboflow import Roboflow
import tempfile
import os
import requests
import cv2

# Muat variabel lingkungan dari file .env
load_dotenv()
api_key = os.getenv("ROBOFLOW_API_KEY")
workspace = os.getenv("ROBOFLOW_WORKSPACE")
project_name = os.getenv("ROBOFLOW_PROJECT")
model_version = int(os.getenv("ROBOFLOW_MODEL_VERSION"))

# Inisialisasi Roboflow menggunakan data yang diambil dari secrets
rf = Roboflow(api_key=api_key)
project = rf.workspace(workspace).project(project_name)
model = project.version(model_version).model

# Fungsi untuk menangani deteksi pada gambar
def detect_objects(image):
    # Simpan gambar yang diupload sebagai file sementara
    with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_file:
        image.save(temp_file, format="JPEG")
        temp_file_path = temp_file.name

    try:
        # Lakukan prediksi pada gambar
        predictions = model.predict(temp_file_path, confidence=60, overlap=80).json()

        # Menghitung jumlah objek per kelas
        class_count = {}
        total_count = 0

        for prediction in predictions['predictions']:
            class_name = prediction['class']
            class_count[class_name] = class_count.get(class_name, 0) + 1
            total_count += 1

        # Menyusun output berupa string hasil perhitungan
        result_text = "Product Nestle\n\n"
        for class_name, count in class_count.items():
            result_text += f"{class_name}: {count}\n"
        result_text += f"\nTotal Product Nestle: {total_count}"

        # Menyimpan gambar dengan prediksi
        output_image_path = "/tmp/prediction.jpg"
        model.predict(temp_file_path, confidence=60, overlap=80).save(output_image_path)
        
    except requests.exceptions.HTTPError as http_err:
        result_text = f"HTTP error occurred: {http_err}"
        output_image_path = temp_file_path
    except Exception as err:
        result_text = f"An error occurred: {err}"
        output_image_path = temp_file_path

    os.remove(temp_file_path)
    
    return output_image_path, result_text

# Fungsi untuk menangani deteksi pada video
def detect_objects_in_video(video_path):
    temp_output_path = "/tmp/output_video.mp4"
    temp_frames_dir = tempfile.mkdtemp()

    try:
        # Baca video dan ekstrak frame
        video = cv2.VideoCapture(video_path)
        frame_rate = int(video.get(cv2.CAP_PROP_FPS))
        frame_width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
        frame_height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
        frame_size = (frame_width, frame_height)
        frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))

        # VideoWriter untuk membuat video keluaran
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        output_video = cv2.VideoWriter(temp_output_path, fourcc, frame_rate, frame_size)

        frame_index = 0
        while True:
            ret, frame = video.read()
            if not ret:
                break

            # Simpan frame sementara untuk prediksi
            frame_path = os.path.join(temp_frames_dir, f"frame_{frame_index}.jpg")
            cv2.imwrite(frame_path, frame)

            # Deteksi objek pada frame
            predictions = model.predict(frame_path, confidence=60, overlap=80).json()

            # Gambar bounding box pada frame
            for prediction in predictions['predictions']:
                x, y, w, h = prediction['x'], prediction['y'], prediction['width'], prediction['height']
                class_name = prediction['class']
                color = (0, 255, 0)  # Hijau
                cv2.rectangle(frame, (int(x - w/2), int(y - h/2)), (int(x + w/2), int(y + h/2)), color, 2)
                cv2.putText(frame, class_name, (int(x - w/2), int(y - h/2 - 10)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)

            # Tambahkan frame ke video keluaran
            output_video.write(frame)
            frame_index += 1

        video.release()
        output_video.release()

        return temp_output_path, "Video processing completed successfully."
    
    except Exception as e:
        return None, f"An error occurred: {e}"

# Membuat antarmuka Gradio dengan tata letak fleksibel
with gr.Blocks() as iface:
    with gr.Row():
        with gr.Column():
            input_image = gr.Image(type="pil", label="Input Image")
            input_video = gr.Video(label="Input Video")  # Updated line
        with gr.Column():
            output_image = gr.Image(label="Detect Object")
            output_video = gr.Video(label="Output Video")
        with gr.Column():
            output_text = gr.Textbox(label="Counting Object")
    
    # Tombol untuk memproses gambar
    detect_image_button = gr.Button("Detect Image")
    detect_image_button.click(
        fn=detect_objects, 
        inputs=input_image, 
        outputs=[output_image, output_text]
    )

    # Tombol untuk memproses video
    detect_video_button = gr.Button("Detect Video")
    detect_video_button.click(
        fn=detect_objects_in_video, 
        inputs=input_video, 
        outputs=[output_video, output_text]
    )

# Menjalankan antarmuka
iface.launch()