Spaces:
Sleeping
Sleeping
Update README.md
Browse files
README.md
CHANGED
@@ -64,44 +64,56 @@ pip install transformers torch pillow gradio
|
|
64 |
|
65 |
```python
|
66 |
import gradio as gr
|
67 |
-
from transformers import AutoImageProcessor,
|
68 |
from PIL import Image
|
69 |
import torch
|
70 |
|
71 |
-
# Load model and processor
|
72 |
model_name = "prithivMLmods/Bone-Fracture-Detection"
|
73 |
-
model =
|
74 |
processor = AutoImageProcessor.from_pretrained(model_name)
|
75 |
|
76 |
-
# ID to label mapping
|
77 |
-
id2label = {
|
78 |
-
"0": "Fractured",
|
79 |
-
"1": "Not Fractured"
|
80 |
-
}
|
81 |
-
|
82 |
def detect_fracture(image):
|
|
|
|
|
|
|
|
|
83 |
image = Image.fromarray(image).convert("RGB")
|
|
|
|
|
84 |
inputs = processor(images=image, return_tensors="pt")
|
85 |
|
|
|
86 |
with torch.no_grad():
|
87 |
outputs = model(**inputs)
|
88 |
logits = outputs.logits
|
|
|
|
|
89 |
probs = torch.nn.functional.softmax(logits, dim=1).squeeze().tolist()
|
90 |
|
91 |
-
|
|
|
|
|
|
|
92 |
return prediction
|
93 |
|
94 |
-
# Gradio Interface
|
95 |
iface = gr.Interface(
|
96 |
fn=detect_fracture,
|
97 |
-
inputs=gr.Image(type="numpy"),
|
98 |
-
outputs=gr.Label(num_top_classes=2, label="
|
99 |
-
title="Bone
|
100 |
-
description="Upload a bone X-ray image to detect if there is a fracture."
|
|
|
|
|
|
|
|
|
101 |
)
|
102 |
|
|
|
103 |
if __name__ == "__main__":
|
104 |
iface.launch()
|
|
|
105 |
```
|
106 |
|
107 |
---
|
|
|
64 |
|
65 |
```python
|
66 |
import gradio as gr
|
67 |
+
from transformers import AutoImageProcessor, AutoModelForImageClassification
|
68 |
from PIL import Image
|
69 |
import torch
|
70 |
|
71 |
+
# Load model and processor from the Hugging Face Hub
|
72 |
model_name = "prithivMLmods/Bone-Fracture-Detection"
|
73 |
+
model = AutoModelForImageClassification.from_pretrained(model_name)
|
74 |
processor = AutoImageProcessor.from_pretrained(model_name)
|
75 |
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
def detect_fracture(image):
|
77 |
+
"""
|
78 |
+
Takes a NumPy image array, processes it, and returns the model's prediction.
|
79 |
+
"""
|
80 |
+
# Convert NumPy array to a PIL Image
|
81 |
image = Image.fromarray(image).convert("RGB")
|
82 |
+
|
83 |
+
# Process the image and prepare it as input for the model
|
84 |
inputs = processor(images=image, return_tensors="pt")
|
85 |
|
86 |
+
# Perform inference without calculating gradients
|
87 |
with torch.no_grad():
|
88 |
outputs = model(**inputs)
|
89 |
logits = outputs.logits
|
90 |
+
|
91 |
+
# Apply softmax to get probabilities and convert to a list
|
92 |
probs = torch.nn.functional.softmax(logits, dim=1).squeeze().tolist()
|
93 |
|
94 |
+
# Create a dictionary of labels and their corresponding probabilities
|
95 |
+
# This now correctly uses the labels from the model's configuration
|
96 |
+
prediction = {model.config.id2label[i]: round(probs[i], 3) for i in range(len(probs))}
|
97 |
+
|
98 |
return prediction
|
99 |
|
100 |
+
# Create the Gradio Interface
|
101 |
iface = gr.Interface(
|
102 |
fn=detect_fracture,
|
103 |
+
inputs=gr.Image(type="numpy", label="Upload Bone X-ray"),
|
104 |
+
outputs=gr.Label(num_top_classes=2, label="Detection Result"),
|
105 |
+
title="🔬 Bone Fracture Detection",
|
106 |
+
description="Upload a bone X-ray image to detect if there is a fracture. The model will return the probability for 'Fractured' and 'Not Fractured'.",
|
107 |
+
examples=[
|
108 |
+
["fractured_example.png"],
|
109 |
+
["not_fractured_example.png"]
|
110 |
+
] # Note: You would need to have these image files in the same directory for the examples to work.
|
111 |
)
|
112 |
|
113 |
+
# Launch the app
|
114 |
if __name__ == "__main__":
|
115 |
iface.launch()
|
116 |
+
|
117 |
```
|
118 |
|
119 |
---
|