root
commited on
Commit
·
87ee19d
1
Parent(s):
39ebd66
update
Browse files- README.md +1 -2
- README_zh.md +1 -2
- generate_config.py +60 -0
README.md
CHANGED
|
@@ -43,7 +43,7 @@ This dataset is meticulously curated from five renowned open-source emotional di
|
|
| 43 |
The dataset is organized as follows:
|
| 44 |
|
| 45 |
```
|
| 46 |
-
ExpressiveSpeech
|
| 47 |
├── audio/
|
| 48 |
│ ├── M3ED
|
| 49 |
│ │ ├── audio_00001.wav
|
|
@@ -55,7 +55,6 @@ ExpressiveSpeech.tar.gz/
|
|
| 55 |
└── metadata.jsonl
|
| 56 |
```
|
| 57 |
|
| 58 |
-
- **`ExpressiveSpeech.tar.gz`**: This package contains all the audio files in `.wav` format (16kHz, 16-bit, mono).
|
| 59 |
- **`metadata.jsonl`**: A jsonl file containing detailed information for each utterance. The metadata includes:
|
| 60 |
- `audio_path`: The relative path to the audio file.
|
| 61 |
- `value`: The ASR-generated text transcription.
|
|
|
|
| 43 |
The dataset is organized as follows:
|
| 44 |
|
| 45 |
```
|
| 46 |
+
ExpressiveSpeech/
|
| 47 |
├── audio/
|
| 48 |
│ ├── M3ED
|
| 49 |
│ │ ├── audio_00001.wav
|
|
|
|
| 55 |
└── metadata.jsonl
|
| 56 |
```
|
| 57 |
|
|
|
|
| 58 |
- **`metadata.jsonl`**: A jsonl file containing detailed information for each utterance. The metadata includes:
|
| 59 |
- `audio_path`: The relative path to the audio file.
|
| 60 |
- `value`: The ASR-generated text transcription.
|
README_zh.md
CHANGED
|
@@ -35,7 +35,7 @@
|
|
| 35 |
数据集的组织结构如下:
|
| 36 |
|
| 37 |
```
|
| 38 |
-
ExpressiveSpeech
|
| 39 |
├── audio/
|
| 40 |
│ ├── M3ED
|
| 41 |
│ │ ├── audio_00001.wav
|
|
@@ -47,7 +47,6 @@ ExpressiveSpeech.tar.gz/
|
|
| 47 |
└── metadata.jsonl
|
| 48 |
```
|
| 49 |
|
| 50 |
-
- **`ExpressiveSpeech.tar.gz`**: 该压缩包包含所有 `.wav` 格式的音频文件(16kHz, 16-bit, 单声道)。
|
| 51 |
- **`metadata.jsonl`**: 一个 `jsonl` 文件,其中包含每条语音的详细信息。元数据包括:
|
| 52 |
- `audio_path`: 音频文件的相对路径。
|
| 53 |
- `value`: 由 ASR 生成的文本转录。
|
|
|
|
| 35 |
数据集的组织结构如下:
|
| 36 |
|
| 37 |
```
|
| 38 |
+
ExpressiveSpeech/
|
| 39 |
├── audio/
|
| 40 |
│ ├── M3ED
|
| 41 |
│ │ ├── audio_00001.wav
|
|
|
|
| 47 |
└── metadata.jsonl
|
| 48 |
```
|
| 49 |
|
|
|
|
| 50 |
- **`metadata.jsonl`**: 一个 `jsonl` 文件,其中包含每条语音的详细信息。元数据包括:
|
| 51 |
- `audio_path`: 音频文件的相对路径。
|
| 52 |
- `value`: 由 ASR 生成的文本转录。
|
generate_config.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datasets import load_dataset, Audio
|
| 2 |
+
|
| 3 |
+
# 1. 登录 (如果需要)
|
| 4 |
+
# ...
|
| 5 |
+
|
| 6 |
+
# 2. 以 JSON Lines 格式加载原始的、嵌套的数据集
|
| 7 |
+
print("正在以 JSON Lines 格式加载 metadata.jsonl...")
|
| 8 |
+
nested_dataset = load_dataset('json', data_files='metadata.jsonl', split='train')
|
| 9 |
+
|
| 10 |
+
# 3. 使用 .map() 扁平化数据集(适配新版 datasets 库)
|
| 11 |
+
print("正在扁平化数据集结构 (使用 .map)...")
|
| 12 |
+
def flatten_conversations_batch(batch):
|
| 13 |
+
# 'batch' 是一个字典,它的值是列表,例如:
|
| 14 |
+
# {'conversations': [ [{'No':1,...},{'No':2,...}], [{'No':3,...}] ]}
|
| 15 |
+
|
| 16 |
+
# 我们创建一个新的字典来存放扁平化后的列
|
| 17 |
+
flat_batch = {
|
| 18 |
+
'No': [],
|
| 19 |
+
'from': [],
|
| 20 |
+
'value': [],
|
| 21 |
+
'emotion': [],
|
| 22 |
+
'length': [],
|
| 23 |
+
'score_arousal': [],
|
| 24 |
+
'score_prosody': [],
|
| 25 |
+
'score_nature': [],
|
| 26 |
+
'score_expressive': [],
|
| 27 |
+
'audio-path': []
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
# 遍历批次中的每一个 'conversations' 列表
|
| 31 |
+
for conversation_list in batch['conversations']:
|
| 32 |
+
# 遍历列表中的每一个样本字典
|
| 33 |
+
for sample in conversation_list:
|
| 34 |
+
# 将样本的值追加到新字典的对应列表中
|
| 35 |
+
for key in flat_batch:
|
| 36 |
+
flat_batch[key].append(sample.get(key))
|
| 37 |
+
|
| 38 |
+
return flat_batch
|
| 39 |
+
|
| 40 |
+
# 使用 .map() 应用这个函数
|
| 41 |
+
# batched=True 表示函数一次处理一批数据,效率更高
|
| 42 |
+
# remove_columns 会移除原始的 'conversations' 列
|
| 43 |
+
dataset = nested_dataset.map(
|
| 44 |
+
flatten_conversations_batch,
|
| 45 |
+
batched=True,
|
| 46 |
+
remove_columns=nested_dataset.column_names
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
print("数据集已成功扁平化,现在的列为:", dataset.column_names)
|
| 50 |
+
|
| 51 |
+
# 4. 定义数据类型
|
| 52 |
+
print("正在转换音频列...")
|
| 53 |
+
# 确保'audio-path'中的音频文件路径相对于仓库根目录是正确的
|
| 54 |
+
dataset = dataset.cast_column("audio-path", Audio())
|
| 55 |
+
|
| 56 |
+
# 5. 将配置和优化后的元数据推送到Hub
|
| 57 |
+
print("正在将新的配置和元数据上传到Hub...")
|
| 58 |
+
dataset.push_to_hub("FreedomIntelligence/ExpressiveSpeech")
|
| 59 |
+
|
| 60 |
+
print("操作完成!请刷新您在Hugging Face上的数据集页面。")
|