Spaces:
Running
Running
ignore temp files
Browse files
main.py
CHANGED
|
@@ -101,12 +101,12 @@ async def classify_image(file: UploadFile = File(None)):
|
|
| 101 |
|
| 102 |
inputs = model(image)
|
| 103 |
|
| 104 |
-
with torch.no_grad():
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
|
| 111 |
# model predicts one of the 1000 ImageNet classes
|
| 112 |
# predicted_label = logits.argmax(-1).item()
|
|
@@ -114,13 +114,13 @@ async def classify_image(file: UploadFile = File(None)):
|
|
| 114 |
# logging.info("model.config.id2label[predicted_label] %s", model.config.id2label[predicted_label])
|
| 115 |
# # print(model.config.id2label[predicted_label])
|
| 116 |
# Find the prediction with the highest confidence using the max() function
|
| 117 |
-
|
| 118 |
# logging.info("best_prediction %s", best_prediction)
|
| 119 |
# best_prediction2 = results[1]["label"]
|
| 120 |
# logging.info("best_prediction2 %s", best_prediction2)
|
| 121 |
|
| 122 |
# # Calculate the confidence score, rounded to the nearest tenth and as a percentage
|
| 123 |
-
|
| 124 |
|
| 125 |
# # Prepare the custom response data
|
| 126 |
detection_result = {
|
|
@@ -186,26 +186,27 @@ async def classify_images(request: ImageUrlsRequest):
|
|
| 186 |
image = Image.open(io.BytesIO(image_data))
|
| 187 |
inputs = model(image)
|
| 188 |
|
| 189 |
-
with torch.no_grad():
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
|
| 196 |
# model predicts one of the 1000 ImageNet classes
|
| 197 |
# predicted_label = logits.argmax(-1).item()
|
| 198 |
# logging.info("predicted_label", predicted_label)
|
| 199 |
# logging.info("model.config.id2label[predicted_label] %s", model.config.id2label[predicted_label])
|
| 200 |
# # print(model.config.id2label[predicted_label])
|
| 201 |
-
|
| 202 |
-
|
| 203 |
# logging.info("best_prediction %s", best_prediction)
|
| 204 |
# best_prediction2 = results[1]["label"]
|
| 205 |
# logging.info("best_prediction2 %s", best_prediction2)
|
| 206 |
|
| 207 |
# # Calculate the confidence score, rounded to the nearest tenth and as a percentage
|
| 208 |
-
|
|
|
|
| 209 |
|
| 210 |
# # Prepare the custom response data
|
| 211 |
detection_result = {
|
|
|
|
| 101 |
|
| 102 |
inputs = model(image)
|
| 103 |
|
| 104 |
+
# with torch.no_grad():
|
| 105 |
+
# logits = model(**inputs).logits
|
| 106 |
+
# probs = F.softmax(logits, dim=-1)
|
| 107 |
+
# predicted_label_id = probs.argmax(-1).item()
|
| 108 |
+
# predicted_label = model.config.id2label[predicted_label_id]
|
| 109 |
+
# confidence = probs.max().item()
|
| 110 |
|
| 111 |
# model predicts one of the 1000 ImageNet classes
|
| 112 |
# predicted_label = logits.argmax(-1).item()
|
|
|
|
| 114 |
# logging.info("model.config.id2label[predicted_label] %s", model.config.id2label[predicted_label])
|
| 115 |
# # print(model.config.id2label[predicted_label])
|
| 116 |
# Find the prediction with the highest confidence using the max() function
|
| 117 |
+
predicted_label = max(inputs, key=lambda x: x["score"])
|
| 118 |
# logging.info("best_prediction %s", best_prediction)
|
| 119 |
# best_prediction2 = results[1]["label"]
|
| 120 |
# logging.info("best_prediction2 %s", best_prediction2)
|
| 121 |
|
| 122 |
# # Calculate the confidence score, rounded to the nearest tenth and as a percentage
|
| 123 |
+
confidence = round(predicted_label["score"] * 100, 1)
|
| 124 |
|
| 125 |
# # Prepare the custom response data
|
| 126 |
detection_result = {
|
|
|
|
| 186 |
image = Image.open(io.BytesIO(image_data))
|
| 187 |
inputs = model(image)
|
| 188 |
|
| 189 |
+
# with torch.no_grad():
|
| 190 |
+
# logits = model(**inputs).logits
|
| 191 |
+
# probs = F.softmax(logits, dim=-1)
|
| 192 |
+
# predicted_label_id = probs.argmax(-1).item()
|
| 193 |
+
# predicted_label = model.config.id2label[predicted_label_id]
|
| 194 |
+
# confidence = probs.max().item()
|
| 195 |
|
| 196 |
# model predicts one of the 1000 ImageNet classes
|
| 197 |
# predicted_label = logits.argmax(-1).item()
|
| 198 |
# logging.info("predicted_label", predicted_label)
|
| 199 |
# logging.info("model.config.id2label[predicted_label] %s", model.config.id2label[predicted_label])
|
| 200 |
# # print(model.config.id2label[predicted_label])
|
| 201 |
+
predicted_label = max(inputs, key=lambda x: x["score"])
|
| 202 |
+
# best_prediction = max(results, key=lambda x: x["score"])
|
| 203 |
# logging.info("best_prediction %s", best_prediction)
|
| 204 |
# best_prediction2 = results[1]["label"]
|
| 205 |
# logging.info("best_prediction2 %s", best_prediction2)
|
| 206 |
|
| 207 |
# # Calculate the confidence score, rounded to the nearest tenth and as a percentage
|
| 208 |
+
# confidence_percentage = round(best_prediction["score"] * 100, 1)
|
| 209 |
+
confidence = round(predicted_label["score"] * 100, 1)
|
| 210 |
|
| 211 |
# # Prepare the custom response data
|
| 212 |
detection_result = {
|