SakibAhmed commited on
Commit
c0b6368
·
verified ·
1 Parent(s): 590e0ea

Upload 8 files

Browse files
Dockerfile ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Dockerfile
2
+ FROM python:3.11
3
+
4
+ WORKDIR /app
5
+
6
+ COPY requirements.txt .
7
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
8
+
9
+ COPY . .
10
+
11
+ RUN useradd -m -u 1000 user
12
+ USER user
13
+
14
+ ENV HOME=/home/user \
15
+ PATH=/home/user/.local/bin:$PATH
16
+
17
+ EXPOSE 7860
18
+
19
+ # Run both Flask and agent using supervisord or similar
20
+ CMD python -m flask run --host=0.0.0.0 --port=7860 & python app.py
README.md CHANGED
@@ -1,11 +1 @@
1
- ---
2
- title: Piper TTS API And Frontend
3
- emoji: 👀
4
- colorFrom: blue
5
- colorTo: gray
6
- sdk: docker
7
- pinned: false
8
- short_description: Piper-TTS-API-and-Frontend
9
- ---
10
-
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ # Piper-TTS-API-and-Frontend
 
 
 
 
 
 
 
 
 
 
app.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import logging
3
+ import wave
4
+ from pathlib import Path
5
+ import struct
6
+
7
+ from flask import Flask, Response, jsonify, render_template, request, send_file, stream_with_context
8
+ from flask_cors import CORS
9
+ from piper import PiperVoice
10
+
11
+ # Configure logging
12
+ logging.basicConfig(level=logging.INFO)
13
+ logger = logging.getLogger(__name__)
14
+
15
+ app = Flask(__name__)
16
+ CORS(app) # This will enable CORS for all routes.
17
+
18
+ # In-memory cache for PiperVoice instances
19
+ tts_instances = {}
20
+
21
+ # Directory where voice models are stored
22
+ VOICES_DIR = Path(__file__).parent / "voices"
23
+
24
+ def get_tts_instance(voice):
25
+ """
26
+ Retrieves a cached PiperVoice instance or creates a new one.
27
+ Loads the model and its required .onnx.json config file.
28
+ """
29
+ if voice not in tts_instances:
30
+ logger.info(f"Creating new PiperVoice instance for voice: {voice}")
31
+ try:
32
+ model_path, config_path = None, None
33
+ possible_paths = [
34
+ VOICES_DIR / f"{voice}.onnx",
35
+ Path(__file__).parent / f"{voice}.onnx",
36
+ Path(f"{voice}.onnx"),
37
+ ]
38
+ for path in possible_paths:
39
+ if path.exists():
40
+ model_path = str(path)
41
+ potential_config_path = path.with_suffix(".onnx.json")
42
+ if potential_config_path.exists():
43
+ config_path = str(potential_config_path)
44
+ logger.info(f"Found model at: {model_path}")
45
+ logger.info(f"Found config at: {config_path}")
46
+ break
47
+
48
+ if not model_path or not config_path:
49
+ logger.error(f"Voice model or config not found for '{voice}'. Ensure both '.onnx' and '.onnx.json' are present.")
50
+ return None
51
+
52
+ tts_instances[voice] = PiperVoice.load(model_path, config_path=config_path)
53
+ except Exception as e:
54
+ logger.error(f"Failed to create PiperVoice instance for voice {voice}: {e}", exc_info=True)
55
+ return None
56
+ return tts_instances[voice]
57
+
58
+ @app.route('/')
59
+ def index():
60
+ """Serves the index.html frontend."""
61
+ return render_template('index.html')
62
+
63
+ @app.route('/api/tts', methods=['GET'])
64
+ def synthesize_audio_full():
65
+ """
66
+ Generates the full audio file and returns it.
67
+ """
68
+ text = request.args.get('text')
69
+ voice = request.args.get('voice', 'en_GB-alba-medium')
70
+
71
+ if not text:
72
+ return jsonify({"error": "Text to synthesize is required."}), 400
73
+
74
+ tts_instance = get_tts_instance(voice)
75
+ if not tts_instance:
76
+ return jsonify({"error": f"Could not load voice model for '{voice}'."}), 500
77
+
78
+ try:
79
+ wav_io = io.BytesIO()
80
+ with wave.open(wav_io, 'wb') as wav_file:
81
+ wav_file.setnchannels(1)
82
+ wav_file.setsampwidth(2) # 16-bit
83
+ wav_file.setframerate(tts_instance.config.sample_rate)
84
+
85
+ # THE CORRECT FIX, BASED ON YOUR PROVIDED `piper_tts.py`
86
+ # The AudioChunk object has a specific attribute for the raw bytes.
87
+ for audio_chunk in tts_instance.synthesize(text):
88
+ wav_file.writeframes(audio_chunk.audio_int16_bytes)
89
+
90
+ wav_io.seek(0)
91
+
92
+ return send_file(
93
+ wav_io,
94
+ mimetype='audio/wav',
95
+ as_attachment=True,
96
+ download_name='output.wav'
97
+ )
98
+ except Exception as e:
99
+ logger.error(f"Error during full synthesis: {e}", exc_info=True)
100
+ return jsonify({"error": f"Failed to synthesize audio: {str(e)}"}), 500
101
+
102
+ def generate_audio_stream(tts_instance, text):
103
+ """A generator function that streams the synthesized audio."""
104
+ try:
105
+ # 1. Create and yield the WAV header.
106
+ def create_wav_header(sample_rate, bits_per_sample=16, channels=1):
107
+ datasize = 2**32 - 1 # Use max value for streaming
108
+ o = [b'RIFF', struct.pack('<I', datasize + 36), b'WAVE', b'fmt ',
109
+ struct.pack('<I', 16), struct.pack('<H', 1), struct.pack('<H', channels),
110
+ struct.pack('<I', sample_rate),
111
+ struct.pack('<I', sample_rate * channels * bits_per_sample // 8),
112
+ struct.pack('<H', channels * bits_per_sample // 8),
113
+ struct.pack('<H', bits_per_sample), b'data', struct.pack('<I', datasize)]
114
+ return b"".join(o)
115
+
116
+ header = create_wav_header(tts_instance.config.sample_rate)
117
+ yield header
118
+
119
+ # 2. THE CORRECT FIX, APPLIED TO STREAMING
120
+ # Yield the raw bytes from the .audio_int16_bytes attribute.
121
+ for audio_chunk in tts_instance.synthesize(text):
122
+ yield audio_chunk.audio_int16_bytes
123
+
124
+ except Exception as e:
125
+ logger.error(f"Error during stream generation: {e}", exc_info=True)
126
+
127
+
128
+ @app.route('/api/tts-stream', methods=['GET'])
129
+ def synthesize_audio_stream():
130
+ """
131
+ Streams the synthesized audio back to the client as it's generated.
132
+ """
133
+ text = request.args.get('text')
134
+ voice = request.args.get('voice', 'en_GB-alba-medium')
135
+
136
+ if not text:
137
+ return jsonify({"error": "Text to synthesize is required."}), 400
138
+
139
+ tts_instance = get_tts_instance(voice)
140
+ if not tts_instance:
141
+ return jsonify({"error": f"Could not load voice model for '{voice}'."}), 500
142
+
143
+ stream_generator = generate_audio_stream(tts_instance, text)
144
+ return Response(stream_with_context(stream_generator), mimetype='audio/wav')
145
+
146
+ if __name__ == '__main__':
147
+ app.run(debug=True, port=5001)
postman.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "info": {
3
+ "_postman_id": "YOUR_COLLECTION_ID",
4
+ "name": "NOW Piper TTS API",
5
+ "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json"
6
+ },
7
+ "item": [
8
+ {
9
+ "name": "Synthesize Audio",
10
+ "request": {
11
+ "method": "GET",
12
+ "header": [],
13
+ "url": {
14
+ "raw": "http://127.0.0.1:5001/api/tts?text=Hello%2C%20this%20is%20a%20test.&voice=en_US-lessac-medium",
15
+ "protocol": "http",
16
+ "host": [
17
+ "127",
18
+ "0",
19
+ "0",
20
+ "1"
21
+ ],
22
+ "port": "5001",
23
+ "path": [
24
+ "api",
25
+ "tts"
26
+ ],
27
+ "query": [
28
+ {
29
+ "key": "text",
30
+ "value": "Hello, this is a test."
31
+ },
32
+ {
33
+ "key": "voice",
34
+ "value": "en_US-lessac-medium"
35
+ }
36
+ ]
37
+ }
38
+ },
39
+ "response": []
40
+ }
41
+ ]
42
+ }
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ Flask==3.1.2
2
+ Flask_Cors==5.0.0
3
+ piper_tts==1.3.0
templates/index.html ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>Piper TTS</title>
7
+ <style>
8
+ body { font-family: sans-serif; margin: 2em; }
9
+ textarea { width: 100%; height: 100px; }
10
+ button { margin-top: 1em; }
11
+ audio { margin-top: 1em; }
12
+ </style>
13
+ </head>
14
+ <body>
15
+ <h1>Piper Text-to-Speech</h1>
16
+ <textarea id="text-input" placeholder="Enter text to synthesize..."></textarea>
17
+ <br>
18
+ <label for="voice-select">Select Voice:</label>
19
+ <select id="voice-select">
20
+ <option value="en_US-lessac-medium">English (US) - Lessac Medium</option>
21
+ <!-- Add more voice options here as you download models -->
22
+ </select>
23
+ <br>
24
+ <button id="synthesize-button">Synthesize</button>
25
+ <br>
26
+ <audio id="audio-player" controls></audio>
27
+
28
+ <script>
29
+ const textInput = document.getElementById('text-input');
30
+ const voiceSelect = document.getElementById('voice-select');
31
+ const synthesizeButton = document.getElementById('synthesize-button');
32
+ const audioPlayer = document.getElementById('audio-player');
33
+
34
+ synthesizeButton.addEventListener('click', async () => {
35
+ const text = textInput.value;
36
+ const voice = voiceSelect.value;
37
+
38
+ if (!text) {
39
+ alert('Please enter some text to synthesize.');
40
+ return;
41
+ }
42
+
43
+ try {
44
+ const response = await fetch(`/api/tts?text=${encodeURIComponent(text)}&voice=${encodeURIComponent(voice)}`);
45
+
46
+ if (response.ok) {
47
+ const blob = await response.blob();
48
+ const url = URL.createObjectURL(blob);
49
+ audioPlayer.src = url;
50
+ audioPlayer.play();
51
+ } else {
52
+ const error = await response.json();
53
+ alert(`Error: ${error.error}`);
54
+ }
55
+ } catch (error) {
56
+ console.error('Error fetching audio:', error);
57
+ alert('An unexpected error occurred.');
58
+ }
59
+ });
60
+ </script>
61
+ </body>
62
+ </html>
voices/en_GB-alba-medium.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:401369c4a81d09fdd86c32c5c864440811dbdcc66466cde2d64f7133a66ad03b
3
+ size 63201294
voices/en_GB-alba-medium.onnx.json ADDED
@@ -0,0 +1,493 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "audio": {
3
+ "sample_rate": 22050,
4
+ "quality": "medium"
5
+ },
6
+ "espeak": {
7
+ "voice": "en-gb-x-rp"
8
+ },
9
+ "inference": {
10
+ "noise_scale": 0.667,
11
+ "length_scale": 1,
12
+ "noise_w": 0.8
13
+ },
14
+ "phoneme_type": "espeak",
15
+ "phoneme_map": {},
16
+ "phoneme_id_map": {
17
+ "_": [
18
+ 0
19
+ ],
20
+ "^": [
21
+ 1
22
+ ],
23
+ "$": [
24
+ 2
25
+ ],
26
+ " ": [
27
+ 3
28
+ ],
29
+ "!": [
30
+ 4
31
+ ],
32
+ "'": [
33
+ 5
34
+ ],
35
+ "(": [
36
+ 6
37
+ ],
38
+ ")": [
39
+ 7
40
+ ],
41
+ ",": [
42
+ 8
43
+ ],
44
+ "-": [
45
+ 9
46
+ ],
47
+ ".": [
48
+ 10
49
+ ],
50
+ ":": [
51
+ 11
52
+ ],
53
+ ";": [
54
+ 12
55
+ ],
56
+ "?": [
57
+ 13
58
+ ],
59
+ "a": [
60
+ 14
61
+ ],
62
+ "b": [
63
+ 15
64
+ ],
65
+ "c": [
66
+ 16
67
+ ],
68
+ "d": [
69
+ 17
70
+ ],
71
+ "e": [
72
+ 18
73
+ ],
74
+ "f": [
75
+ 19
76
+ ],
77
+ "h": [
78
+ 20
79
+ ],
80
+ "i": [
81
+ 21
82
+ ],
83
+ "j": [
84
+ 22
85
+ ],
86
+ "k": [
87
+ 23
88
+ ],
89
+ "l": [
90
+ 24
91
+ ],
92
+ "m": [
93
+ 25
94
+ ],
95
+ "n": [
96
+ 26
97
+ ],
98
+ "o": [
99
+ 27
100
+ ],
101
+ "p": [
102
+ 28
103
+ ],
104
+ "q": [
105
+ 29
106
+ ],
107
+ "r": [
108
+ 30
109
+ ],
110
+ "s": [
111
+ 31
112
+ ],
113
+ "t": [
114
+ 32
115
+ ],
116
+ "u": [
117
+ 33
118
+ ],
119
+ "v": [
120
+ 34
121
+ ],
122
+ "w": [
123
+ 35
124
+ ],
125
+ "x": [
126
+ 36
127
+ ],
128
+ "y": [
129
+ 37
130
+ ],
131
+ "z": [
132
+ 38
133
+ ],
134
+ "æ": [
135
+ 39
136
+ ],
137
+ "ç": [
138
+ 40
139
+ ],
140
+ "ð": [
141
+ 41
142
+ ],
143
+ "ø": [
144
+ 42
145
+ ],
146
+ "ħ": [
147
+ 43
148
+ ],
149
+ "ŋ": [
150
+ 44
151
+ ],
152
+ "œ": [
153
+ 45
154
+ ],
155
+ "ǀ": [
156
+ 46
157
+ ],
158
+ "ǁ": [
159
+ 47
160
+ ],
161
+ "ǂ": [
162
+ 48
163
+ ],
164
+ "ǃ": [
165
+ 49
166
+ ],
167
+ "ɐ": [
168
+ 50
169
+ ],
170
+ "ɑ": [
171
+ 51
172
+ ],
173
+ "ɒ": [
174
+ 52
175
+ ],
176
+ "ɓ": [
177
+ 53
178
+ ],
179
+ "ɔ": [
180
+ 54
181
+ ],
182
+ "ɕ": [
183
+ 55
184
+ ],
185
+ "ɖ": [
186
+ 56
187
+ ],
188
+ "ɗ": [
189
+ 57
190
+ ],
191
+ "ɘ": [
192
+ 58
193
+ ],
194
+ "ə": [
195
+ 59
196
+ ],
197
+ "ɚ": [
198
+ 60
199
+ ],
200
+ "ɛ": [
201
+ 61
202
+ ],
203
+ "ɜ": [
204
+ 62
205
+ ],
206
+ "ɞ": [
207
+ 63
208
+ ],
209
+ "ɟ": [
210
+ 64
211
+ ],
212
+ "ɠ": [
213
+ 65
214
+ ],
215
+ "ɡ": [
216
+ 66
217
+ ],
218
+ "ɢ": [
219
+ 67
220
+ ],
221
+ "ɣ": [
222
+ 68
223
+ ],
224
+ "ɤ": [
225
+ 69
226
+ ],
227
+ "ɥ": [
228
+ 70
229
+ ],
230
+ "ɦ": [
231
+ 71
232
+ ],
233
+ "ɧ": [
234
+ 72
235
+ ],
236
+ "ɨ": [
237
+ 73
238
+ ],
239
+ "ɪ": [
240
+ 74
241
+ ],
242
+ "ɫ": [
243
+ 75
244
+ ],
245
+ "ɬ": [
246
+ 76
247
+ ],
248
+ "ɭ": [
249
+ 77
250
+ ],
251
+ "ɮ": [
252
+ 78
253
+ ],
254
+ "ɯ": [
255
+ 79
256
+ ],
257
+ "ɰ": [
258
+ 80
259
+ ],
260
+ "ɱ": [
261
+ 81
262
+ ],
263
+ "ɲ": [
264
+ 82
265
+ ],
266
+ "ɳ": [
267
+ 83
268
+ ],
269
+ "ɴ": [
270
+ 84
271
+ ],
272
+ "ɵ": [
273
+ 85
274
+ ],
275
+ "ɶ": [
276
+ 86
277
+ ],
278
+ "ɸ": [
279
+ 87
280
+ ],
281
+ "ɹ": [
282
+ 88
283
+ ],
284
+ "ɺ": [
285
+ 89
286
+ ],
287
+ "ɻ": [
288
+ 90
289
+ ],
290
+ "ɽ": [
291
+ 91
292
+ ],
293
+ "ɾ": [
294
+ 92
295
+ ],
296
+ "ʀ": [
297
+ 93
298
+ ],
299
+ "ʁ": [
300
+ 94
301
+ ],
302
+ "ʂ": [
303
+ 95
304
+ ],
305
+ "ʃ": [
306
+ 96
307
+ ],
308
+ "ʄ": [
309
+ 97
310
+ ],
311
+ "ʈ": [
312
+ 98
313
+ ],
314
+ "ʉ": [
315
+ 99
316
+ ],
317
+ "ʊ": [
318
+ 100
319
+ ],
320
+ "ʋ": [
321
+ 101
322
+ ],
323
+ "ʌ": [
324
+ 102
325
+ ],
326
+ "ʍ": [
327
+ 103
328
+ ],
329
+ "ʎ": [
330
+ 104
331
+ ],
332
+ "ʏ": [
333
+ 105
334
+ ],
335
+ "ʐ": [
336
+ 106
337
+ ],
338
+ "ʑ": [
339
+ 107
340
+ ],
341
+ "ʒ": [
342
+ 108
343
+ ],
344
+ "ʔ": [
345
+ 109
346
+ ],
347
+ "ʕ": [
348
+ 110
349
+ ],
350
+ "ʘ": [
351
+ 111
352
+ ],
353
+ "ʙ": [
354
+ 112
355
+ ],
356
+ "ʛ": [
357
+ 113
358
+ ],
359
+ "ʜ": [
360
+ 114
361
+ ],
362
+ "ʝ": [
363
+ 115
364
+ ],
365
+ "ʟ": [
366
+ 116
367
+ ],
368
+ "ʡ": [
369
+ 117
370
+ ],
371
+ "ʢ": [
372
+ 118
373
+ ],
374
+ "ʲ": [
375
+ 119
376
+ ],
377
+ "ˈ": [
378
+ 120
379
+ ],
380
+ "ˌ": [
381
+ 121
382
+ ],
383
+ "ː": [
384
+ 122
385
+ ],
386
+ "ˑ": [
387
+ 123
388
+ ],
389
+ "˞": [
390
+ 124
391
+ ],
392
+ "β": [
393
+ 125
394
+ ],
395
+ "θ": [
396
+ 126
397
+ ],
398
+ "χ": [
399
+ 127
400
+ ],
401
+ "ᵻ": [
402
+ 128
403
+ ],
404
+ "ⱱ": [
405
+ 129
406
+ ],
407
+ "0": [
408
+ 130
409
+ ],
410
+ "1": [
411
+ 131
412
+ ],
413
+ "2": [
414
+ 132
415
+ ],
416
+ "3": [
417
+ 133
418
+ ],
419
+ "4": [
420
+ 134
421
+ ],
422
+ "5": [
423
+ 135
424
+ ],
425
+ "6": [
426
+ 136
427
+ ],
428
+ "7": [
429
+ 137
430
+ ],
431
+ "8": [
432
+ 138
433
+ ],
434
+ "9": [
435
+ 139
436
+ ],
437
+ "̧": [
438
+ 140
439
+ ],
440
+ "̃": [
441
+ 141
442
+ ],
443
+ "̪": [
444
+ 142
445
+ ],
446
+ "̯": [
447
+ 143
448
+ ],
449
+ "̩": [
450
+ 144
451
+ ],
452
+ "ʰ": [
453
+ 145
454
+ ],
455
+ "ˤ": [
456
+ 146
457
+ ],
458
+ "ε": [
459
+ 147
460
+ ],
461
+ "↓": [
462
+ 148
463
+ ],
464
+ "#": [
465
+ 149
466
+ ],
467
+ "\"": [
468
+ 150
469
+ ],
470
+ "↑": [
471
+ 151
472
+ ],
473
+ "̺": [
474
+ 152
475
+ ],
476
+ "̻": [
477
+ 153
478
+ ]
479
+ },
480
+ "num_symbols": 256,
481
+ "num_speakers": 1,
482
+ "speaker_id_map": {},
483
+ "piper_version": "1.0.0",
484
+ "language": {
485
+ "code": "en_GB",
486
+ "family": "en",
487
+ "region": "GB",
488
+ "name_native": "English",
489
+ "name_english": "English",
490
+ "country_english": "Great Britain"
491
+ },
492
+ "dataset": "alba"
493
+ }