Spaces:
Sleeping
Sleeping
tlemagueresse
commited on
Commit
·
f49da88
1
Parent(s):
b55aab5
Import fast model
Browse files
app.py
CHANGED
@@ -1,9 +1,58 @@
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
2 |
|
|
|
|
|
|
|
|
|
3 |
|
4 |
-
|
5 |
-
|
|
|
6 |
|
|
|
|
|
|
|
|
|
7 |
|
8 |
-
|
9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
from huggingface_hub import hf_hub_download
|
3 |
+
import importlib.util
|
4 |
+
import os
|
5 |
+
import sys
|
6 |
|
7 |
+
# Step 1: Dynamically load the model file
|
8 |
+
repo_id = "tlmk22/QuefrencyGuardian"
|
9 |
+
model_path = hf_hub_download(repo_id=repo_id, filename="model.py")
|
10 |
+
model_dir = os.path.dirname(model_path)
|
11 |
|
12 |
+
# Add downloaded path to sys.path for Python module recognition
|
13 |
+
if model_dir not in sys.path:
|
14 |
+
sys.path.append(model_dir)
|
15 |
|
16 |
+
# Load the model dynamically
|
17 |
+
spec = importlib.util.spec_from_file_location("model", model_path)
|
18 |
+
model_module = importlib.util.module_from_spec(spec)
|
19 |
+
spec.loader.exec_module(model_module)
|
20 |
|
21 |
+
# Load FastModelHuggingFace class
|
22 |
+
FastModelHuggingFace = model_module.FastModelHuggingFace
|
23 |
+
|
24 |
+
# Step 2: Load the pre-trained model (dynamically from HuggingFace Hub)
|
25 |
+
fast_model = FastModelHuggingFace.from_pretrained(repo_id)
|
26 |
+
|
27 |
+
# Step 3: Define a prediction function
|
28 |
+
map_labels = {0: "chainsaw", 1: "environment"} # Label mapping
|
29 |
+
|
30 |
+
|
31 |
+
def predict_audio(file):
|
32 |
+
"""
|
33 |
+
Predict if a given audio file contains chainsaw activity or not.
|
34 |
+
File: Input WAV file (uploaded via Gradio).
|
35 |
+
"""
|
36 |
+
prediction = fast_model.predict(file, device="cpu") # Assume CPU inference
|
37 |
+
predicted_label = map_labels[prediction[0]]
|
38 |
+
return f"Prediction: {predicted_label}"
|
39 |
+
|
40 |
+
|
41 |
+
# Step 4: Build Gradio Interface
|
42 |
+
|
43 |
+
# Define Gradio app elements
|
44 |
+
drag_and_drop_input = gr.Audio(type="filepath", label="Upload WAV File")
|
45 |
+
output_text = gr.Textbox(label="Prediction Result")
|
46 |
+
|
47 |
+
# Create Gradio Application
|
48 |
+
demo = gr.Interface(
|
49 |
+
fn=predict_audio,
|
50 |
+
inputs=drag_and_drop_input,
|
51 |
+
outputs=output_text,
|
52 |
+
title="Quefrency Guardian: Chainsaw Noise Detector",
|
53 |
+
description="Drag and drop a .wav audio file to predict whether it contains chainsaw noise or background environment sounds.",
|
54 |
+
)
|
55 |
+
|
56 |
+
# Launch App
|
57 |
+
if __name__ == "__main__":
|
58 |
+
demo.launch()
|