TM9450 commited on
Commit
59b13b0
·
1 Parent(s): 0294b73

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -9
app.py CHANGED
@@ -1,14 +1,22 @@
1
- import streamlit as st
2
  from transformers import pipeline
3
 
4
  vision_classifier = pipeline(task="image-classification")
 
5
 
6
- text = st.text_area('Enter a link to an image:')
 
 
7
 
8
- if text:
9
- result = vision_classifier(images=text)
10
- st.text("\n".join([f"Class {d['label']} with score {round(d['score'], 4)}" for d in result]))
11
-
12
- #result = vision_classifier(images="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg")
13
- #print("\n".join([f"Class {d['label']} with score {round(d['score'], 4)}" for d in result]))
14
- #st.text("\n".join([f"Class {d['label']} with score {round(d['score'], 4)}" for d in result]))
 
 
 
 
 
 
 
 
1
  from transformers import pipeline
2
 
3
  vision_classifier = pipeline(task="image-classification")
4
+ result = vision_classifier(images="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg")
5
 
6
+ from transformers import AutoFeatureExtractor, AutoModelForImageClassification
7
+ import torch
8
+ from datasets import load_dataset
9
 
10
+ dataset = load_dataset("huggingface/cats-image")
11
+ image = dataset["test"]["image"][0]
12
+
13
+ feature_extractor = AutoFeatureExtractor.from_pretrained("google/vit-base-patch16-224")
14
+ model = AutoModelForImageClassification.from_pretrained("google/vit-base-patch16-224")
15
+
16
+ inputs = feature_extractor(image, return_tensors="pt")
17
+
18
+ with torch.no_grad():
19
+ logits = model(**inputs).logits
20
+
21
+ predicted_label = logits.argmax(-1).item()
22
+ print(model.config.id2label[predicted_label])