Spaces:
Sleeping
Sleeping
File size: 6,262 Bytes
6307f85 26c4598 c0d1e9d b98ae35 a01acdf 6307f85 b98ae35 307cad5 6307f85 26c4598 c0d1e9d 26c4598 afc397a 355aa9e 26c4598 2aaecad af6c368 2aaecad 26c4598 aa96f98 66fad5c a06c3a6 3980c97 c38e7cf 307cad5 26c4598 c0d1e9d a01acdf 355aa9e a3bbe90 355aa9e 04f5b7c 9d86353 c0d1e9d aa96f98 c0d1e9d aa96f98 c0d1e9d 0e19682 d342cfa c0d1e9d 307cad5 9d83fe7 c7a6674 9d83fe7 65a5971 1609b93 9d83fe7 307cad5 a857acc f5aeda9 a857acc 0e19682 4f251ee 0e19682 f5aeda9 0e19682 49221d6 26c4598 0e19682 c0d1e9d d342cfa 307cad5 c0d1e9d 6307f85 544ec84 8f40b79 0db11e8 8f40b79 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 |
import os
import logging
import torch
import datetime
import requests
from google.cloud import storage
from transformers import AutoImageProcessor, AutoModelForObjectDetection, ViTImageProcessor, Swinv2ForImageClassification
from label_studio_ml.model import LabelStudioMLBase
from lxml import etree
from uuid import uuid4
from PIL import Image
from creds import get_credentials
from io import BytesIO
def generate_download_signed_url_v4(blob_name):
"""Generates a v4 signed URL for downloading a blob.
Note that this method requires a service account key file. You can not use
this if you are using Application Default Credentials from Google Compute
Engine or from the Google Cloud SDK.
"""
bucket_name = os.getenv("bucket")
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(blob_name.replace(f"gs://{bucket_name}/", ""))
url = blob.generate_signed_url(
version="v4",
# This URL is valid for 15 minutes
expiration=datetime.timedelta(minutes=15),
# Allow GET requests using this URL.
method="GET",
)
return url
class Model(LabelStudioMLBase):
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = get_credentials()
image_processor = AutoImageProcessor.from_pretrained("diegokauer/conditional-detr-coe-int")
model = AutoModelForObjectDetection.from_pretrained("diegokauer/conditional-detr-coe-int")
seg_image_processor = ViTImageProcessor.from_pretrained("diegokauer/int-pet-classifier-v2")
seg_model = Swinv2ForImageClassification.from_pretrained("diegokauer/int-pet-classifier-v2")
id2label = model.config.id2label
seg_id2label = seg_model.config.id2label
def predict(self, tasks, **kwargs):
""" This is where inference happens: model returns
the list of predictions based on input list of tasks
"""
predictions = []
for task in tasks:
url = task["data"]["image"]
response = requests.get(generate_download_signed_url_v4(url))
print(response)
image_data = BytesIO(response.content)
image = Image.open(image_data)
original_width, original_height = image.size
with torch.no_grad():
inputs = self.image_processor(images=image, return_tensors="pt")
outputs = self.model(**inputs)
target_sizes = torch.tensor([image.size[::-1]])
results = self.image_processor.post_process_object_detection(outputs, threshold=0.5, target_sizes=target_sizes)[0]
result_list = []
for score, label, box in zip(results['scores'], results['labels'], results['boxes']):
label_id = str(uuid4())
x, y, x2, y2 = tuple(box)
if self.id2label[label.item()] == 'Propuesta':
with torch.no_grad():
pred_label_id = str(uuid4())
image = image.crop((x.item(), y.item(), x2.item(), y2.item()))
inputs = self.seg_image_processor(images=image, return_tensors="pt")
logits = self.seg_model(**inputs).logits
logits = 1 / (1 + torch.exp(-logits))
print(logits)
preds = logits > 0.5
preds = [self.seg_id2label[i] for i, pred in enumerate(preds.squeeze().tolist()) if pred]
preds = ["No Reportado"] if "No Reportado" in preds else preds
result_list.append({
"value": {
"choices": preds
},
"id": pred_label_id,
"from_name": "propuesta",
"to_name": "image",
"type": "choices"
})
result_list.append({
'id': label_id,
'original_width': original_width,
'original_height': original_height,
'from_name': "bbox",
'to_name': "image",
'type': 'rectangle',
'score': score.item(), # per-region score, visible in the editor
'value': {
'x': x.item() * 100.0 / original_width,
'y': y.item() * 100.0 / original_height,
'width': (x2-x).item() * 100.0 / original_width,
'height': (y2-y).item() * 100.0 / original_height,
'rotation': 0,
}
})
result_list.append({
'id': label_id,
'original_width': original_width,
'original_height': original_height,
'from_name': "label",
'to_name': "image",
'type': 'labels',
'score': score.item(), # per-region score, visible in the editor
'value': {
'x': x.item() * 100.0 / original_width,
'y': y.item() * 100.0 / original_height,
'width': (x2-x).item() * 100.0 / original_width,
'height': (y2-y).item() * 100.0 / original_height,
'rotation': 0,
'labels': [self.id2label[label.item()]]
}
})
predictions.append({
'score': results['scores'].mean().item(), # prediction overall score, visible in the data manager columns
'model_version': 'cdetr_v2', # all predictions will be differentiated by model version
'result': result_list
})
print(predictions)
return predictions
def fit(self, event, annotations, **kwargs):
""" This is where training happens: train your model given list of annotations,
then returns dict with created links and resources
"""
return {'path/to/created/model': 'my/model.bin'} |