File size: 811 Bytes
3849021
 
2327eb7
 
 
 
 
 
 
 
 
 
 
 
 
 
a4fa232
 
 
2327eb7
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
import transformers
from transformers import AutoFeatureExtractor, AutoModelForImageClassification, ViTImageProcessor, ViTForImageClassification

extractor = AutoFeatureExtractor.from_pretrained("saved_model_files")
model = AutoModelForImageClassification.from_pretrained("saved_model_files")

labels = ['affected', 'destroyed', 'no_damage']

def classify(im):
  features = image_processor(im, return_tensors='pt')
  logits = model(features["pixel_values"])[-1]
  probability = torch.nn.functional.softmax(logits, dim=-1)
  probs = probability[0].detach().numpy()
  confidences = {label: float(probs[i]) for i, label in enumerate(labels)}
  return confidences

import gradio as gr


interface = gr.Interface(
    fn=classify,
    inputs="image",
    outputs="label"
  )

interface.launch(share=True, debug=True)