File size: 14,507 Bytes
6f523af
 
 
 
75c9c9a
 
 
6f523af
75c9c9a
9d3beef
f76a38c
 
 
 
 
 
 
 
 
 
 
 
 
 
9d3beef
8417fa3
87f1f7d
 
 
d296c7b
010fb88
6f523af
 
 
799a0f6
 
6f523af
 
010aaff
6f523af
 
 
799a0f6
010fb88
6f523af
 
11c4a5a
6f523af
4ff8afc
799a0f6
 
 
 
 
 
 
 
 
 
6f523af
4ff8afc
935d736
6f523af
 
 
 
 
 
 
 
 
 
 
 
9d3beef
703ca2c
6f523af
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f76a38c
 
f482080
6f523af
f76a38c
d296c7b
 
6f523af
f482080
f76a38c
8417fa3
 
 
 
 
 
 
6f523af
8417fa3
 
 
 
 
 
 
 
 
 
 
 
d296c7b
8417fa3
 
 
 
 
 
 
 
 
 
6f523af
 
 
8417fa3
 
6f523af
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75c9c9a
6f523af
 
f76a38c
6f523af
f76a38c
 
8417fa3
f76a38c
 
8417fa3
f76a38c
 
 
 
 
 
 
 
 
 
 
 
 
 
d296c7b
8417fa3
 
 
f76a38c
8417fa3
 
 
 
 
f76a38c
 
d296c7b
f76a38c
 
 
 
 
 
 
 
 
 
8417fa3
 
 
 
 
 
6f523af
8417fa3
6f523af
87f1f7d
6f523af
87f1f7d
8417fa3
 
 
 
 
 
f76a38c
8417fa3
6f523af
f482080
8417fa3
935d736
8417fa3
6f523af
 
 
4ff8afc
f76a38c
8417fa3
 
 
 
 
4c19345
8417fa3
 
 
799a0f6
8417fa3
 
 
 
 
 
 
 
f76a38c
4c19345
6f523af
 
f76a38c
f482080
6f523af
 
 
 
f482080
6f523af
 
 
 
 
c53e4c1
5c8a18f
c53e4c1
4c19345
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
from app.logger_config import (
    logger as logging,
    DEBUG
) 
import numpy as np
import gradio as gr
from fastrtc.webrtc import WebRTC
from fastrtc.utils import AdditionalOutputs
from pydub import AudioSegment
from gradio.utils import get_space
from app.supported_languages import (
    SUPPORTED_LANGS_MAP,
)
from app.ui_utils import (
    EXAMPLE_CONFIGS,
    apply_preset_if_example,
    reset_to_defaults,
    summarize_config,
    handle_additional_outputs,
    get_custom_theme,
    on_file_load,
    start_task_asr_ast,
    stop_task_fn
)
from app.utils import (
    READ_SIZE,
    generate_coturn_config,
    read_and_stream_audio,
    stop_streaming,
    raise_error
)
from app.session_utils import (
    on_load,
    on_unload,
    get_active_session_hashes,
    reset_all_active_sessions,
)

import spaces
# --------------------------------------------------------
# Initialization
# --------------------------------------------------------
reset_all_active_sessions()

theme,css_style = get_custom_theme()


with gr.Blocks(theme=theme, css=css_style) as demo:
    session_hash_code = gr.State()
    with gr.Accordion("DEGUG PANEL", open=False, visible=DEBUG):
        session_hash_code_box = gr.Textbox(label="Session ID", interactive=False, visible=DEBUG)
        with gr.Accordion("📊 Active Sessions Hash", open=True ,visible=DEBUG):
            sessions_table = gr.DataFrame(
                headers=["session_hash_code", "file", "start_time", "status"],
                interactive=False,
                wrap=True,
                max_height=200,
            )
            gr.Timer(3.0).tick(fn=get_active_session_hashes, outputs=sessions_table)

    demo.load(fn=on_load, inputs=None, outputs=[session_hash_code, session_hash_code_box])
    demo.unload(fn=on_unload)
    active_filepath = gr.State(value=next(iter(EXAMPLE_CONFIGS)))

    with gr.Walkthrough(selected=0) as walkthrough:
        # === STEP 1 ===
        with gr.Step("Audio", id=0) as audio_source_step:
            gr.Markdown(
                """
                ### Step 1: Upload or Record an Audio File
                You can upload an existing file or record directly from your microphone.  
                Accepted formats: **.wav**, **.mp3**, **.flac**  
                Maximum length recommended: **60 seconds**
                """
            )

            with gr.Group():
                with gr.Column():
                    main_audio = gr.Audio(
                        label="Audio Input",
                        sources=["upload", "microphone"],
                        type="filepath",
                        interactive=True
                    )

                    with gr.Accordion("Need a quick test? Try one of the sample audios below", open=True):
                        examples = gr.Examples(
                            examples=list(EXAMPLE_CONFIGS.keys()),
                            inputs=main_audio,
                            label=None,
                            examples_per_page=3
                        )
                        gr.Markdown(
                            """
                            🔹 **english_meeting.wav** – Short business meeting in English  
                            🔹 **french_news.wav** – Excerpt from a French radio broadcast  
                            🔹 **spanish_podcast.wav** – Segment from a Spanish-language podcast  
                            """
                        )

            go_to_config = gr.Button("Go to Configuration", visible=False)
            ui_components_oload_audio = [active_filepath, go_to_config]
            main_audio.change(fn=on_file_load, inputs=[main_audio], outputs=ui_components_oload_audio)

            go_to_config.click(lambda: gr.Walkthrough(selected=1), outputs=walkthrough)
            


        # === STEP 2 ===
        with gr.Step("Configuration", id=1)as config_step: 
            gr.Markdown("### Step 3: Configure the Task")
            with gr.Group():
                with gr.Row():
                    task_type = gr.Radio(["Transcription", "Translation"], value="Transcription", label="Task Type")
                with gr.Row():
                    lang_source = gr.Dropdown(list(SUPPORTED_LANGS_MAP.keys()), value="French", label="Source Language")
                    lang_target = gr.Dropdown(list(SUPPORTED_LANGS_MAP.keys()), value="English", label="Target Language", visible=False)
            with gr.Accordion("Advanced Configuration", open=False):
                with gr.Group():
                    with gr.Row():
                        gr.Markdown("##### Chunks ")
                    with gr.Row():
                        left_context_secs = gr.Slider(value=20.0, label="left_context_secs",info="Streaming chunk duration in seconds (left context)", minimum=1.0, maximum=60.0, step=1.0, show_reset_button=False)
                        chunk_secs = gr.Slider(value=1.0, label="chunk_secs", info="Streaming chunk duration in seconds (chunk)", minimum=0.1, maximum=5.0, step=0.1, show_reset_button=False)
                        right_context_secs = gr.Slider(value=0.5, label="right_context_secs", info="Streaming chunk duration in seconds (right context)", minimum=0.1, maximum=10.0, step=0.1, show_reset_button=False)
                gr.Markdown("---")
                with gr.Group():
                    with gr.Row():
                        gr.Markdown("##### Decoding ")
                    with gr.Row():
                        streaming_policy = gr.Dropdown(["waitk", "alignatt"], value="alignatt", label="streaming_policy",  elem_classes="full-width",
                                                           info="“Wait-k: Higher accuracy, requires larger left context, higher latency” \n”AlignAtt: Lower latency, suitable for production, predicts multiple tokens per chunk”")
                
                    with gr.Row():
                        alignatt_thr = gr.Number(value=8, label="alignatt_thr", info="Cross-attention threshold for AlignAtt policy (default: 8), alignatt only",  precision=0)
                        waitk_lagging = gr.Number(value=2, label="waitk_lagging", info="Number of chunks to wait in the beginning (default: 2), works for both policies",  precision=0)
                    with gr.Row():
                        exclude_sink_frames = gr.Number(value=8, label="exclude_sink_frames", info="Number of frames to exclude from the xatt scores calculation (default: 8), alignatt only", precision=0)
                        xatt_scores_layer = gr.Number(value=-2, label="xatt_scores_layer", info="Layer to get cross-attention (xatt) scores from (default: -2), alignatt only", precision=0)
                    with gr.Row():
                        hallucinations_detector = gr.Checkbox(value=True, label="hallucinations_detector" , info="Detect hallucinations in the predicted tokens (default: True), works for both policies" )
            with gr.Row():
                auto_apply_presets = gr.Checkbox(value=True, label="Auto-apply presets for sample audios")
                reset_btn = gr.Button("Reset to defaults")
            with gr.Accordion("Configuration Summary", open=False):
                summary_box = gr.Textbox(lines=15, interactive=False,show_label=False)

            # --- Events ---
            task_type.change(
                fn=lambda t: gr.update(visible=(t == "Translation")),
                inputs=task_type,
                outputs=lang_target,
                queue=False
            )

            inputs_list = [
                task_type, lang_source, lang_target,
                chunk_secs, left_context_secs, right_context_secs,
                streaming_policy, alignatt_thr, waitk_lagging,
                exclude_sink_frames, xatt_scores_layer, hallucinations_detector
            ]
            for inp in inputs_list:
                inp.change(
                    fn=summarize_config,
                    inputs=inputs_list,
                    outputs=summary_box,
                    queue=False
                )

            # Apply preset or not
            main_audio.change(
                fn=apply_preset_if_example,
                inputs=[main_audio, auto_apply_presets],
                outputs=[
                    task_type, lang_source, lang_target,
                    chunk_secs, left_context_secs, right_context_secs,
                    streaming_policy, alignatt_thr, waitk_lagging,
                    exclude_sink_frames, xatt_scores_layer, hallucinations_detector,
                    summary_box
                ],
                queue=False
            )

            # Reset defaults
            reset_btn.click(
                fn=reset_to_defaults,
                inputs=None,
                outputs=[
                    task_type, lang_source, lang_target,
                    chunk_secs, left_context_secs, right_context_secs,
                    streaming_policy, alignatt_thr, waitk_lagging,
                    exclude_sink_frames, xatt_scores_layer, hallucinations_detector,
                    summary_box
                ],
                queue=False
            )

            go_to_task = gr.Button("Go to Task")
            go_to_task.click(lambda: gr.Walkthrough(selected=2), outputs=walkthrough)

        # === STEP 3 ===
        with gr.Step("Task", id=2) as task_step:
                with gr.Row():
                    gr.Markdown("## Step 4: Start Stream Task")
                with gr.Group():
                    with gr.Column():
                        webrtc_stream = WebRTC(
                            label="Live Stream",
                            mode="receive",
                            modality="audio",
                            rtc_configuration=generate_coturn_config(),
                            visible=True,
                            inputs=main_audio,
                            icon= "https://cdn-icons-png.flaticon.com/128/18429/18429788.png",
                            pulse_color= "#df7a7a",
                            icon_radius= "10px",
                            icon_button_color= "rgb(255, 255, 255)",
                            height=150,
                            show_label=False
                        )

                        status_slider = gr.Slider(
                            0, 100,
                            value=0,
                            label="Streaming Progress %",
                            show_label=True,
                            interactive=False,
                            visible=False,
                            show_reset_button=False
                        )
                start_stream_button = gr.Button("▶️ Start Streaming", variant="primary")
                stop_stream_button = gr.Button("⏹️ Stop Streaming", visible=False,variant="stop")
     
                webrtc_stream.stream(
                    fn=read_and_stream_audio,
                    inputs=[active_filepath, session_hash_code,gr.State(READ_SIZE)],
                    outputs=[webrtc_stream],
                    trigger=start_stream_button.click,
                    concurrency_id="audio_stream",
                    concurrency_limit=10,
                )
                status_message_stream = gr.Markdown("", elem_id="status-message-stream", visible=False)

                with gr.Row(): 
                    gr.Markdown("---")
                with gr.Row():
                    gr.Markdown("##### Transcription / Translation Result")
                with gr.Row():
                    task_output = gr.Textbox(
                        label="Transcription / Translation Result",
                        show_label=False,
                        lines=10,
                        max_lines= 10,
                        interactive=False,
                        visible=True,
                        autoscroll=True,
                        elem_id="task-output-box" 
                        )
                with gr.Row():
                    status_message_task = gr.Markdown("", elem_id="status-message-task",elem_classes=["info"], visible=False)  
                with gr.Row():
                    start_task_button = gr.Button("▶️ Start Task", visible=False, variant="primary")
                    stop_task_button = gr.Button("⏹️ Stop Task", visible=False,variant="stop")

                    stop_stream_button.click(
                            fn=stop_streaming,
                            inputs=[session_hash_code],
                        )

                    stop_task_button.click(
                        fn=stop_task_fn,
                        inputs=session_hash_code,
                        outputs=task_output
                        )
                    config_task_ui = [session_hash_code,task_type, lang_source, lang_target,
                            chunk_secs, left_context_secs, right_context_secs,
                            streaming_policy, alignatt_thr, waitk_lagging,
                            exclude_sink_frames, xatt_scores_layer, hallucinations_detector]



                    start_task_button.click(
                            fn=start_task_asr_ast,
                            inputs=[
                                session_hash_code,
                                task_type, lang_source, lang_target,
                                chunk_secs, left_context_secs, right_context_secs,
                                streaming_policy, alignatt_thr, waitk_lagging,
                                exclude_sink_frames, xatt_scores_layer, hallucinations_detector

                            ],
                            outputs=[task_output,status_message_task,start_task_button,stop_task_button,config_step]
                    )

                    ui_components = [
                        start_stream_button, stop_stream_button,start_task_button,
                        go_to_config, audio_source_step, status_slider,walkthrough,status_message_stream
                    ]

                    webrtc_stream.on_additional_outputs(
                        fn=handle_additional_outputs,
                        inputs=[webrtc_stream],
                        outputs=ui_components,
                        concurrency_id="additional_outputs_audio_stream",
                        concurrency_limit=10,
                    )



if __name__ == "__main__":
    demo.queue(max_size=10, api_open=False).launch(show_api=False,show_error=True, debug=DEBUG)