tlemagueresse commited on
Commit
cf847e0
·
1 Parent(s): 94c0845

add exemples

Browse files
Files changed (1) hide show
  1. app.py +8 -14
app.py CHANGED
@@ -8,23 +8,14 @@ import sys
8
  repo_id = "tlmk22/QuefrencyGuardian"
9
  model_path = hf_hub_download(repo_id=repo_id, filename="model.py")
10
  model_dir = os.path.dirname(model_path)
11
-
12
- # Add downloaded path to sys.path for Python module recognition
13
  if model_dir not in sys.path:
14
  sys.path.append(model_dir)
15
-
16
- # Load the model dynamically
17
  spec = importlib.util.spec_from_file_location("model", model_path)
18
  model_module = importlib.util.module_from_spec(spec)
19
  spec.loader.exec_module(model_module)
20
-
21
- # Load FastModelHuggingFace class
22
  FastModelHuggingFace = model_module.FastModelHuggingFace
23
-
24
- # Step 2: Load the pre-trained model (dynamically from HuggingFace Hub)
25
  fast_model = FastModelHuggingFace.from_pretrained(repo_id)
26
 
27
- # Step 3: Define a prediction function
28
  map_labels = {0: "chainsaw", 1: "environment"} # Label mapping
29
 
30
 
@@ -33,26 +24,29 @@ def predict_audio(file):
33
  Predict if a given audio file contains chainsaw activity or not.
34
  File: Input WAV file (uploaded via Gradio).
35
  """
36
- prediction = fast_model.predict(file, device="cpu") # Assume CPU inference
37
  predicted_label = map_labels[prediction[0]]
38
  return f"Prediction: {predicted_label}"
39
 
40
 
41
- # Step 4: Build Gradio Interface
 
 
 
 
 
42
 
43
- # Define Gradio app elements
44
  drag_and_drop_input = gr.Audio(type="filepath", label="Upload WAV File")
45
  output_text = gr.Textbox(label="Prediction Result")
46
 
47
- # Create Gradio Application
48
  demo = gr.Interface(
49
  fn=predict_audio,
50
  inputs=drag_and_drop_input,
51
  outputs=output_text,
 
52
  title="Quefrency Guardian: Chainsaw Noise Detector",
53
  description="Drag and drop a .wav audio file to predict whether it contains chainsaw noise or background environment sounds.",
54
  )
55
 
56
- # Launch App
57
  if __name__ == "__main__":
58
  demo.launch()
 
8
  repo_id = "tlmk22/QuefrencyGuardian"
9
  model_path = hf_hub_download(repo_id=repo_id, filename="model.py")
10
  model_dir = os.path.dirname(model_path)
 
 
11
  if model_dir not in sys.path:
12
  sys.path.append(model_dir)
 
 
13
  spec = importlib.util.spec_from_file_location("model", model_path)
14
  model_module = importlib.util.module_from_spec(spec)
15
  spec.loader.exec_module(model_module)
 
 
16
  FastModelHuggingFace = model_module.FastModelHuggingFace
 
 
17
  fast_model = FastModelHuggingFace.from_pretrained(repo_id)
18
 
 
19
  map_labels = {0: "chainsaw", 1: "environment"} # Label mapping
20
 
21
 
 
24
  Predict if a given audio file contains chainsaw activity or not.
25
  File: Input WAV file (uploaded via Gradio).
26
  """
27
+ prediction = fast_model.predict(file, device="cpu")
28
  predicted_label = map_labels[prediction[0]]
29
  return f"Prediction: {predicted_label}"
30
 
31
 
32
+ example_files = [
33
+ "example1.wav",
34
+ "example2.wav",
35
+ ]
36
+
37
+ # Build Gradio Interface
38
 
 
39
  drag_and_drop_input = gr.Audio(type="filepath", label="Upload WAV File")
40
  output_text = gr.Textbox(label="Prediction Result")
41
 
 
42
  demo = gr.Interface(
43
  fn=predict_audio,
44
  inputs=drag_and_drop_input,
45
  outputs=output_text,
46
+ examples=example_files,
47
  title="Quefrency Guardian: Chainsaw Noise Detector",
48
  description="Drag and drop a .wav audio file to predict whether it contains chainsaw noise or background environment sounds.",
49
  )
50
 
 
51
  if __name__ == "__main__":
52
  demo.launch()