clement-pages commited on
Commit
03a5c9f
·
1 Parent(s): 1989564

add rttm component to the demo

Browse files

To show that produced annotations can be saved for later

Files changed (1) hide show
  1. app.py +20 -3
app.py CHANGED
@@ -1,5 +1,6 @@
1
  import gradio as gr
2
  from gryannote_audio import AudioLabeling
 
3
  from pyannote.audio import Pipeline
4
  import os
5
 
@@ -8,7 +9,11 @@ def apply_pipeline(audio):
8
  pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization-3.1", use_auth_token=os.environ["HF_TOKEN"])
9
  annotations = pipeline(audio)
10
 
11
- return (audio, annotations)
 
 
 
 
12
 
13
 
14
  with gr.Blocks() as demo:
@@ -28,7 +33,7 @@ with gr.Blocks() as demo:
28
  "To use the component, start by loading or recording audio."
29
  "Then apply the diarization pipeline (here [pyannote/speaker-diarization-3.1](https://huggingface.co/pyannote/speaker-diarization-3.1))"
30
  "or double-click directly on the waveform. The annotations produced can be edited."
31
- "You can also use keyboard shortcuts to speed things up!"
32
  )
33
  gr.Markdown()
34
  gr.Markdown()
@@ -38,10 +43,14 @@ with gr.Blocks() as demo:
38
  type="filepath",
39
  interactive=True,
40
  )
 
41
  gr.Markdown()
42
  gr.Markdown()
 
43
  run_btn = gr.Button("Run pipeline")
44
 
 
 
45
  gr.Markdown(
46
  """| Shortcut | Action |
47
  | --------------------------------------------- | --------------------------------------------------------------------- |
@@ -66,7 +75,15 @@ with gr.Blocks() as demo:
66
  run_btn.click(
67
  fn=apply_pipeline,
68
  inputs=audio_labeling,
69
- outputs=audio_labeling,
 
 
 
 
 
 
 
 
70
  )
71
 
72
 
 
1
  import gradio as gr
2
  from gryannote_audio import AudioLabeling
3
+ from gryannote_rttm import RTTM
4
  from pyannote.audio import Pipeline
5
  import os
6
 
 
9
  pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization-3.1", use_auth_token=os.environ["HF_TOKEN"])
10
  annotations = pipeline(audio)
11
 
12
+ return ((audio, annotations), (audio, annotations))
13
+
14
+
15
+ def update_annotations(data):
16
+ return rttm.on_edit(data)
17
 
18
 
19
  with gr.Blocks() as demo:
 
33
  "To use the component, start by loading or recording audio."
34
  "Then apply the diarization pipeline (here [pyannote/speaker-diarization-3.1](https://huggingface.co/pyannote/speaker-diarization-3.1))"
35
  "or double-click directly on the waveform. The annotations produced can be edited."
36
+ "You can also use keyboard shortcuts to speed things up! Finally, produced annotations can be saved by cliking on the downloading button in the RTTM component."
37
  )
38
  gr.Markdown()
39
  gr.Markdown()
 
43
  type="filepath",
44
  interactive=True,
45
  )
46
+
47
  gr.Markdown()
48
  gr.Markdown()
49
+
50
  run_btn = gr.Button("Run pipeline")
51
 
52
+ rttm = RTTM()
53
+
54
  gr.Markdown(
55
  """| Shortcut | Action |
56
  | --------------------------------------------- | --------------------------------------------------------------------- |
 
75
  run_btn.click(
76
  fn=apply_pipeline,
77
  inputs=audio_labeling,
78
+ outputs=[audio_labeling, rttm],
79
+ )
80
+
81
+ audio_labeling.edit(
82
+ fn=update_annotations,
83
+ inputs=audio_labeling,
84
+ outputs=rttm,
85
+ preprocess=False,
86
+ postprocess=False,
87
  )
88
 
89