rsortino commited on
Commit
4e1619d
·
verified ·
1 Parent(s): ad29040

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +120 -27
app.py CHANGED
@@ -1,27 +1,120 @@
1
- from fastapi import FastAPI
2
- from dotenv import load_dotenv
3
- from tasks import text, image, audio
4
-
5
- # Load environment variables
6
- load_dotenv()
7
-
8
- app = FastAPI(
9
- title="Frugal AI Challenge API",
10
- description="API for the Frugal AI Challenge evaluation endpoints"
11
- )
12
-
13
- # Include all routers
14
- app.include_router(text.router)
15
- app.include_router(image.router)
16
- app.include_router(audio.router)
17
-
18
- @app.get("/")
19
- async def root():
20
- return {
21
- "message": "Welcome to the Frugal AI Challenge API",
22
- "endpoints": {
23
- "text": "/text - Text classification task",
24
- "image": "/image - Image classification task (coming soon)",
25
- "audio": "/audio - Audio classification task (coming soon)"
26
- }
27
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from ultralytics import YOLO
3
+
4
+
5
+ def yolov10_inference(image, model_id, image_size, conf_threshold):
6
+ model = YOLO(f'{model_id}.engine')
7
+ results = model.predict(source=image, imgsz=image_size, conf=conf_threshold)
8
+ annotated_image = results[0].plot()
9
+ return annotated_image[:, :, ::-1], None
10
+
11
+
12
+ def yolov10_inference_for_examples(image, model_path, image_size, conf_threshold):
13
+ annotated_image, _ = yolov10_inference(image, model_path, image_size, conf_threshold)
14
+ return annotated_image
15
+
16
+
17
+ def app():
18
+ with gr.Blocks():
19
+ with gr.Row():
20
+ with gr.Column():
21
+ image = gr.Image(type="pil", label="Image", visible=True)
22
+ model_id = gr.Dropdown(
23
+ label="Model",
24
+ choices=[
25
+ "yolov10s",
26
+ "yolov10m",
27
+ "yolov10m_ssvd_0.4",
28
+ "yolov10m_wsvd_0.4",
29
+ ],
30
+ value="yolov10s",
31
+ )
32
+ image_size = gr.Slider(
33
+ label="Image Size",
34
+ minimum=320,
35
+ maximum=1280,
36
+ step=32,
37
+ value=640,
38
+ )
39
+ conf_threshold = gr.Slider(
40
+ label="Confidence Threshold",
41
+ minimum=0.0,
42
+ maximum=1.0,
43
+ step=0.05,
44
+ value=0.25,
45
+ )
46
+ yolov10_infer = gr.Button(value="Detect Smoke")
47
+
48
+ with gr.Column():
49
+ output_image = gr.Image(type="numpy", label="Annotated Image", visible=True)
50
+
51
+ yolov10_infer.click(
52
+ fn=yolov10_inference,
53
+ inputs=[image, model_id, image_size, conf_threshold],
54
+ outputs=[output_image],
55
+ )
56
+
57
+ gr.Examples(
58
+ examples=[
59
+ [
60
+ "examples/smoke1.jpg",
61
+ "yolov10s",
62
+ 1280,
63
+ 0.25,
64
+ ],
65
+ [
66
+ "examples/smoke2.jpg",
67
+ "yolov10s",
68
+ 1280,
69
+ 0.25,
70
+ ],
71
+ [
72
+ "examples/smoke3.jpg",
73
+ "yolov10s",
74
+ 1280,
75
+ 0.25,
76
+ ],
77
+ [
78
+ "examples/smoke4.jpg",
79
+ "yolov10s",
80
+ 1280,
81
+ 0.25,
82
+ ],
83
+ [
84
+ "examples/smoke5.jpg",
85
+ "yolov10s",
86
+ 1280,
87
+ 0.25,
88
+ ],
89
+ ],
90
+ fn=yolov10_inference_for_examples,
91
+ inputs=[
92
+ image,
93
+ model_id,
94
+ image_size,
95
+ conf_threshold,
96
+ ],
97
+ outputs=[output_image],
98
+ cache_examples='lazy',
99
+ )
100
+
101
+ gradio_app = gr.Blocks()
102
+ with gradio_app:
103
+ gr.HTML(
104
+ """
105
+ <h1 style='text-align: center'>
106
+ YOLOv10 for early fire detection with low resource consumption
107
+ </h1>
108
+ """)
109
+ gr.HTML(
110
+ """
111
+ <h3 style='text-align: center'>
112
+ Original paper and code:
113
+ <a href='https://arxiv.org/abs/2405.14458' target='_blank'>arXiv</a> | <a href='https://github.com/THU-MIG/yolov10' target='_blank'>github</a>
114
+ </h3>
115
+ """)
116
+ with gr.Row():
117
+ with gr.Column():
118
+ app()
119
+ if __name__ == '__main__':
120
+ gradio_app.launch()