atlury commited on
Commit
0d5e9d4
·
verified ·
1 Parent(s): db520f8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -40
app.py CHANGED
@@ -1,47 +1,52 @@
1
- import gradio as gr
2
- from ultralytics import YOLO
3
- import cv2
4
- import numpy as np
5
  import os
6
- import requests
7
  import torch
 
8
 
9
- # Ensure the model file is in the correct location
10
- model_path = "yolov8x-doclaynet-epoch64-imgsz640-initiallr1e-4-finallr1e-5.pt"
11
- if not os.path.exists(model_path):
12
- # Download the model file if it doesn't exist
13
- model_url = "https://huggingface.co/DILHTWD/documentlayoutsegmentation_YOLOv8_ondoclaynet/resolve/main/yolov8x-doclaynet-epoch64-imgsz640-initiallr1e-4-finallr1e-5.pt"
14
- response = requests.get(model_url)
15
- with open(model_path, "wb") as f:
16
- f.write(response.content)
17
-
18
- # Load the document segmentation model on CPU
19
- device = torch.device('cpu')
20
- docseg_model = YOLO(model_path).to(device)
21
-
22
- def process_image(image):
23
- # Convert image to the format YOLO model expects
24
- image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
25
- results = docseg_model(image)
26
-
27
- # Extract annotated image from results
28
- annotated_img = results[0].plot()
29
- annotated_img = cv2.cvtColor(annotated_img, cv2.COLOR_BGR2RGB)
30
-
31
- # Prepare detected areas and labels as text output
32
- detected_areas_labels = "\n".join(
33
- [f"{box.label}: {box.conf:.2f}" for box in results[0].boxes]
34
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
- return annotated_img, detected_areas_labels
 
37
 
38
- # Define the Gradio interface
39
- interface = gr.Interface(
40
- fn=process_image,
41
- inputs=gr.Image(type="pil"),
42
- outputs=[gr.Image(type="pil", label="Annotated Image"),
43
- gr.Textbox(label="Detected Areas and Labels")]
44
- )
45
 
46
- if __name__ == "__main__":
47
- interface.launch()
 
1
+ import spaces
2
+ import datetime
 
 
3
  import os
4
+ import subprocess
5
  import torch
6
+ import gradio as gr
7
 
8
+ CUSTOM_CSS = """
9
+ #output_box textarea {
10
+ font-family: IBM Plex Mono, ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;
11
+ }
12
+ """
13
+
14
+ zero = torch.Tensor([0]).cuda()
15
+ print(zero.device) # <-- 'cpu' 🤔
16
+
17
+ @spaces.GPU
18
+ def run_gpu() -> str:
19
+ print(zero.device) # <-- 'cuda:0' 🤗
20
+ output: str = ""
21
+ try:
22
+ output = subprocess.check_output(["nvidia-smi"], text=True)
23
+ except FileNotFoundError:
24
+ output = "nvidia-smi failed"
25
+ comment = (
26
+ datetime.datetime.now().replace(microsecond=0).isoformat().replace("T", " ")
 
 
 
 
 
 
27
  )
28
+ return f"# {comment}\n\n{output}"
29
+
30
+ def run(check: bool) -> str:
31
+ if check:
32
+ return run_gpu()
33
+ else:
34
+ comment = (
35
+ datetime.datetime.now().replace(microsecond=0).isoformat().replace("T", " ")
36
+ )
37
+ return f"# {comment}\n\nThis is running on CPU\n\nClick on 'Run on GPU' below to move to GPU instantly and run nvidia-smi"
38
+
39
+ output = gr.Textbox(
40
+ label="Command Output", max_lines=32, elem_id="output_box", value=run(False)
41
+ )
42
 
43
+ with gr.Blocks(css=CUSTOM_CSS) as demo:
44
+ gr.Markdown("#### `zero-gpu`: how to run on serverless GPU for free on Spaces 🔥")
45
 
46
+ output.render()
47
+
48
+ check = gr.Checkbox(label="Run on GPU")
49
+
50
+ check.change(run, inputs=[check], outputs=output, every=1)
 
 
51
 
52
+ demo.queue().launch(show_api=False)