Spaces:
Sleeping
Sleeping
Upload folder using huggingface_hub
Browse files- .github/workflows/update_space.yml +28 -0
- README.md +3 -9
- app.py +69 -0
- best_model.pt +3 -0
- examples/guess1.jpg +0 -0
- examples/guess2.jpg +0 -0
- gradio_cached_examples/16/indices.csv +2 -0
- gradio_cached_examples/16/log.csv +3 -0
.github/workflows/update_space.yml
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Run Python script
|
2 |
+
|
3 |
+
on:
|
4 |
+
push:
|
5 |
+
branches:
|
6 |
+
- main
|
7 |
+
|
8 |
+
jobs:
|
9 |
+
build:
|
10 |
+
runs-on: ubuntu-latest
|
11 |
+
|
12 |
+
steps:
|
13 |
+
- name: Checkout
|
14 |
+
uses: actions/checkout@v2
|
15 |
+
|
16 |
+
- name: Set up Python
|
17 |
+
uses: actions/setup-python@v2
|
18 |
+
with:
|
19 |
+
python-version: '3.9'
|
20 |
+
|
21 |
+
- name: Install Gradio
|
22 |
+
run: python -m pip install -r requirements.txt
|
23 |
+
|
24 |
+
- name: Log in to Hugging Face
|
25 |
+
run: python -c 'import huggingface_hub; huggingface_hub.login(token="${{ secrets.hf_token }}")'
|
26 |
+
|
27 |
+
- name: Deploy to Spaces
|
28 |
+
run: cd gradio/ && gradio deploy
|
README.md
CHANGED
@@ -1,12 +1,6 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji: 💻
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 5.6.0
|
8 |
app_file: app.py
|
9 |
-
|
|
|
10 |
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: echo-chatbot
|
|
|
|
|
|
|
|
|
|
|
3 |
app_file: app.py
|
4 |
+
sdk: gradio
|
5 |
+
sdk_version: 4.44.1
|
6 |
---
|
|
|
|
app.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import torch
|
3 |
+
import numpy as np
|
4 |
+
import lightning as pl
|
5 |
+
import gradio as gr
|
6 |
+
from PIL import Image
|
7 |
+
from torchvision import transforms
|
8 |
+
from timeit import default_timer as timer
|
9 |
+
from torch.nn import functional as F
|
10 |
+
|
11 |
+
torch.set_float32_matmul_precision('medium')
|
12 |
+
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
|
13 |
+
torch.set_default_device( device= device )
|
14 |
+
torch.autocast(enabled = True,dtype='float16',device_type='cuda')
|
15 |
+
|
16 |
+
pl.seed_everything(123, workers=True)
|
17 |
+
|
18 |
+
TEST_TRANSFORMS = transforms.Compose([
|
19 |
+
transforms.Resize((224, 224)),
|
20 |
+
transforms.ToTensor(),
|
21 |
+
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
|
22 |
+
])
|
23 |
+
class_labels= ['Beagle', 'Boxer', 'Bulldog', 'Dachshund', 'German_Shepherd', 'Golden_Retriever','Labrador_Retriever', 'Poodle','Rottweiler','Yorkshire_Terrier']
|
24 |
+
|
25 |
+
|
26 |
+
# Model
|
27 |
+
model = torch.jit.load('best_model.pt').to(device)
|
28 |
+
|
29 |
+
@torch.no_grad()
|
30 |
+
def predict_fn(img:Image):
|
31 |
+
start_time = timer()
|
32 |
+
try:
|
33 |
+
# img = np.array(img)
|
34 |
+
# print(img)
|
35 |
+
img = TEST_TRANSFORMS(img).to(device)
|
36 |
+
# print(type(img),img.shape)
|
37 |
+
logits = model(img.unsqueeze(0))
|
38 |
+
probabilities = F.softmax(logits,dim=-1)
|
39 |
+
# print(torch.topk(probabilities,k=2))
|
40 |
+
y_pred = probabilities.argmax(dim=-1).item()
|
41 |
+
confidence = probabilities[0][y_pred].item()
|
42 |
+
predicted_label = class_labels[y_pred]
|
43 |
+
# print(confidence,predicted_label)
|
44 |
+
pred_time = round(timer()-start_time,5)
|
45 |
+
res = {f"Title: {predicted_label}":confidence}
|
46 |
+
return (res,pred_time)
|
47 |
+
except Exception as e:
|
48 |
+
print(f"error:: {e}")
|
49 |
+
gr.Error("An error occured 💥!", duration=5)
|
50 |
+
return ({ f"Title ☠️": 0.0},0.0)
|
51 |
+
|
52 |
+
|
53 |
+
|
54 |
+
|
55 |
+
|
56 |
+
gr.Interface(
|
57 |
+
fn=predict_fn,
|
58 |
+
inputs=gr.Image(type='pil'),
|
59 |
+
outputs=[
|
60 |
+
gr.Label(num_top_classes=1, label="Predictions"), # what are the outputs?
|
61 |
+
gr.Number(label="Prediction time (s)")
|
62 |
+
],
|
63 |
+
examples=[ ['examples/'+i] for i in os.listdir(os.path.join( os.path.dirname(__file__) ,'examples'))],
|
64 |
+
title="Dog Breeds Classifier 🐈",
|
65 |
+
description="CNN-based Architecture for Fast and Accurate DogsBreed Classifier",
|
66 |
+
article="Created by muthukamalan.m ❤️",
|
67 |
+
cache_examples=True,
|
68 |
+
).launch(share=False,debug=False)
|
69 |
+
|
best_model.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:32a2bf828456af508eab2d44f0305883e779380bb33a9b90004381c68b64ad51
|
3 |
+
size 1024905
|
examples/guess1.jpg
ADDED
![]() |
examples/guess2.jpg
ADDED
![]() |
gradio_cached_examples/16/indices.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
1
|
2 |
+
0
|
gradio_cached_examples/16/log.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
Predictions,Prediction time (s),flag,username,timestamp
|
2 |
+
"{""label"": ""Title \u2620\ufe0f"", ""confidences"": [{""label"": ""Title \u2620\ufe0f"", ""confidence"": 0.0}]}",0.0,,,2024-11-20 16:28:01.033882
|
3 |
+
"{""label"": ""Title \u2620\ufe0f"", ""confidences"": [{""label"": ""Title \u2620\ufe0f"", ""confidence"": 0.0}]}",0.0,,,2024-11-20 16:28:04.131286
|