SakibRumu
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -17,20 +17,30 @@ model = YOLO("/home/user/app/best.pt") # আপনি যেই path এ best.p
|
|
17 |
|
18 |
# Frame processing function
|
19 |
def process_frame(frame):
|
20 |
-
|
|
|
|
|
|
|
21 |
img_tensor = torch.from_numpy(img).permute(2, 0, 1).float() / 255.0
|
22 |
img_tensor = img_tensor.unsqueeze(0)
|
23 |
|
|
|
24 |
results = model(img_tensor, augment=False)
|
25 |
-
|
|
|
|
|
26 |
|
27 |
extracted_texts = []
|
28 |
confidences = []
|
29 |
|
30 |
-
for det in
|
31 |
-
|
32 |
if conf > 0.5:
|
33 |
-
|
|
|
|
|
|
|
|
|
34 |
cls = int(cls)
|
35 |
|
36 |
label_map = {0: "Analog", 1: "Digital", 2: "Non-LP"}
|
@@ -51,6 +61,7 @@ def process_frame(frame):
|
|
51 |
|
52 |
return frame, "\n".join(extracted_texts), ", ".join(confidences)
|
53 |
|
|
|
54 |
# Input handler
|
55 |
def process_input(input_file):
|
56 |
file_path = input_file.name
|
|
|
17 |
|
18 |
# Frame processing function
|
19 |
def process_frame(frame):
|
20 |
+
# Resize image to 640x640
|
21 |
+
frame_resized = cv2.resize(frame, (640, 640))
|
22 |
+
|
23 |
+
img = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2RGB)
|
24 |
img_tensor = torch.from_numpy(img).permute(2, 0, 1).float() / 255.0
|
25 |
img_tensor = img_tensor.unsqueeze(0)
|
26 |
|
27 |
+
# Run inference with the YOLO model (no need to manually apply nms)
|
28 |
results = model(img_tensor, augment=False)
|
29 |
+
|
30 |
+
# Extract results (list of detections)
|
31 |
+
detections = results.xywh[0] # YOLO's detection results
|
32 |
|
33 |
extracted_texts = []
|
34 |
confidences = []
|
35 |
|
36 |
+
for det in detections:
|
37 |
+
x_center, y_center, width, height, conf, cls = det.tolist()
|
38 |
if conf > 0.5:
|
39 |
+
# Convert from YOLO format to bounding box format
|
40 |
+
x1 = int((x_center - width / 2) * 640)
|
41 |
+
y1 = int((y_center - height / 2) * 640)
|
42 |
+
x2 = int((x_center + width / 2) * 640)
|
43 |
+
y2 = int((y_center + height / 2) * 640)
|
44 |
cls = int(cls)
|
45 |
|
46 |
label_map = {0: "Analog", 1: "Digital", 2: "Non-LP"}
|
|
|
61 |
|
62 |
return frame, "\n".join(extracted_texts), ", ".join(confidences)
|
63 |
|
64 |
+
|
65 |
# Input handler
|
66 |
def process_input(input_file):
|
67 |
file_path = input_file.name
|