Datasets:
Formats:
parquet
Size:
100K - 1M
Tags:
automatic-speech-recognition
text-to-speech
spoken-language-identification
speech
audio
african-languages
License:
metadata
configs:
- config_name: asr_train_dev
data_files:
- split: train
path: asr_train_dev/HF_train*
- split: validation
path: asr_train_dev/HF_dev*
- config_name: slid_train_dev
data_files:
- split: train
path: slid_train_dev/HF_train*
- split: validation
path: slid_train_dev/HF_dev*
- config_name: tts_train_dev
data_files:
- split: train
path: tts_train_dev/HF_train*
- split: validation
path: tts_train_dev/HF_dev*
- config_name: tts_test_ewe
data_files:
- split: test
path: tts_test/HF_test-ewe*
- config_name: tts_test_kin
data_files:
- split: test
path: tts_test/HF_test-kin*
- config_name: tts_test_Asante-twi
data_files:
- split: test
path: tts_test/HF_test-Asante-twi*
- config_name: tts_test_yor
data_files:
- split: test
path: tts_test/HF_test-yor*
- config_name: tts_test_wol
data_files:
- split: test
path: tts_test/HF_test-wol*
- config_name: tts_test_hau
data_files:
- split: test
path: tts_test/HF_test-hau*
- config_name: tts_test_lin
data_files:
- split: test
path: tts_test/HF_test-lin*
- config_name: tts_test_xho
data_files:
- split: test
path: tts_test/HF_test-xho*
- config_name: tts_test_tsn
data_files:
- split: test
path: tts_test/HF_test-tsn*
- config_name: tts_test_afr
data_files:
- split: test
path: tts_test/HF_test-afr*
- config_name: tts_test_sot
data_files:
- split: test
path: tts_test/HF_test-sot*
- config_name: tts_test_Akuapim-twi
data_files:
- split: test
path: tts_test/HF_test-Akuapim-twi*
- config_name: slid_61_test
data_files:
- split: test
path: slid_test/HF_merged_slid_test_61*
- config_name: asr_test_Akuapim-twi
data_files:
- split: test
path: asr_test/HF_test-Akuapim-twi*
- config_name: asr_test_Asante-twi
data_files:
- split: test
path: asr_test/HF_test-Asante-twi*
- config_name: asr_test_afr
data_files:
- split: test
path: asr_test/HF_test-afr*
- config_name: asr_test_amh
data_files:
- split: test
path: asr_test/HF_test-amh*
- config_name: asr_test_bas
data_files:
- split: test
path: asr_test/HF_test-bas*
- config_name: asr_test_bem
data_files:
- split: test
path: asr_test/HF_test-bem*
- config_name: asr_test_dav
data_files:
- split: test
path: asr_test/HF_test-dav*
- config_name: asr_test_dyu
data_files:
- split: test
path: asr_test/HF_test-dyu*
- config_name: asr_test_fat
data_files:
- split: test
path: asr_test/HF_test-fat*
- config_name: asr_test_fon
data_files:
- split: test
path: asr_test/HF_test-fon*
- config_name: asr_test_fuc
data_files:
- split: test
path: asr_test/HF_test-fuc*
- config_name: asr_test_fuf
data_files:
- split: test
path: asr_test/HF_test-fuf*
- config_name: asr_test_gaa
data_files:
- split: test
path: asr_test/HF_test-gaa*
- config_name: asr_test_hau
data_files:
- split: test
path: asr_test/HF_test-hau*
- config_name: asr_test_ibo
data_files:
- split: test
path: asr_test/HF_test-ibo*
- config_name: asr_test_kab
data_files:
- split: test
path: asr_test/HF_test-kab*
- config_name: asr_test_kin
data_files:
- split: test
path: asr_test/HF_test-kin*
- config_name: asr_test_kln
data_files:
- split: test
path: asr_test/HF_test-kln*
- config_name: asr_test_loz
data_files:
- split: test
path: asr_test/HF_test-loz*
- config_name: asr_test_lug
data_files:
- split: test
path: asr_test/HF_test-lug*
- config_name: asr_test_luo
data_files:
- split: test
path: asr_test/HF_test-luo*
- config_name: asr_test_mlq
data_files:
- split: test
path: asr_test/HF_test-mlq*
- config_name: asr_test_nbl
data_files:
- split: test
path: asr_test/HF_test-nbl*
- config_name: asr_test_nso
data_files:
- split: test
path: asr_test/HF_test-nso*
- config_name: asr_test_nya
data_files:
- split: test
path: asr_test/HF_test-nya*
- config_name: asr_test_sot
data_files:
- split: test
path: asr_test/HF_test-sot*
- config_name: asr_test_srr
data_files:
- split: test
path: asr_test/HF_test-srr*
- config_name: asr_test_ssw
data_files:
- split: test
path: asr_test/HF_test-ssw*
- config_name: asr_test_sus
data_files:
- split: test
path: asr_test/HF_test-sus*
- config_name: asr_test_swa
data_files:
- split: test
path: asr_test/HF_test-sw*
- config_name: asr_test_tig
data_files:
- split: test
path: asr_test/HF_test-tig*
- config_name: asr_test_tir
data_files:
- split: test
path: asr_test/HF_test-tir*
- config_name: asr_test_toi
data_files:
- split: test
path: asr_test/HF_test-toi*
- config_name: asr_test_tsn
data_files:
- split: test
path: asr_test/HF_test-tsn*
- config_name: asr_test_tso
data_files:
- split: test
path: asr_test/HF_test-tso*
- config_name: asr_test_twi
data_files:
- split: test
path: asr_test/HF_test-twi*
- config_name: asr_test_ven
data_files:
- split: test
path: asr_test/HF_test-ven*
- config_name: asr_test_wol
data_files:
- split: test
path: asr_test/HF_test-wol*
- config_name: asr_test_xho
data_files:
- split: test
path: asr_test/HF_test-xho*
- config_name: asr_test_yor
data_files:
- split: test
path: asr_test/HF_test-yor*
- config_name: asr_test_zgh
data_files:
- split: test
path: asr_test/HF_test-zgh*
- config_name: asr_test_zul
data_files:
- split: test
path: asr_test/HF_test-zul*
language:
- afr
- amh
- bas
- bem
- dyu
- ee
- fat
- fon
- fra
- fuc
- fuf
- gaa
- hau
- ibo
- kab
- kin
- kln
- kon
- lin
- loz
- lug
- luo
- mlq
- nbl
- nso
- nya
- orm
- por
- sna
- som
- sot
- ssw
- swa
- tir
- tig
- toi
- tsn
- tso
- twi
- ven
- wol
- xho
- yor
- zul
- zgh
- aka
- mos
- umb
- din
- sag
- mlg
license: cc-by-4.0
tags:
- automatic-speech-recognition
- text-to-speech
- spoken-language-identification
- speech
- audio
- african-languages
- multilingual
- low-resource
- benchmark
- simbabench
- simba
task_categories:
- automatic-speech-recognition
- text-to-speech
- audio-classification
models:
- UBC-NLP/Simba-S
- UBC-NLP/Simba-M
- UBC-NLP/Simba-H
- UBC-NLP/Simba-W
- UBC-NLP/Simba-X
- UBC-NLP/Simba-TTS-twi-asanti
- UBC-NLP/Simba-TTS-lin
- UBC-NLP/Simba-TTS-sot
- UBC-NLP/Simba-TTS-tsn
- UBC-NLP/Simba-TTS-xho
- UBC-NLP/Simba-TTS-twi-akuapem
- UBC-NLP/Simba-TTS-afr
- UBC-NLP/Simba-SLID-49
datasets:
- UBC-NLP/SimbaBench
SibmaBench Data Release & Benchmarking
To evaluate your model on SimbaBench across all supported tasks (ASR, TTS, and SLID), simply load the corresponding configuration for the task and language you wish to benchmark.
Each task is organized by configuration name (e.g., asr_test_afr, tts_test_wol, slid_61_test). Loading a configuration provides the standardized evaluation split for that specific benchmark.
Example:
from datasets import load_dataset
data = load_dataset("UBC-NLP/SimbaBench_dataset", "asr_test_afr")
DatasetDict({
test: Dataset({
features: ['split', 'benchmark_id', 'audio', 'text', 'duration_s', 'lang_iso3', 'lang_name'],
num_rows: 1000
})
})
data['test'][0]
{'split': 'test',
'benchmark_id': 'afr_Lwazi_afr_test_idx3889',
'audio': {'path': None,
'array': array([ 4.27246094e-04, 7.62939453e-04, 6.71386719e-04, ...,
-3.05175781e-04, -2.13623047e-04, -6.10351562e-05]),
'sampling_rate': 16000},
'text': 'watter, verontwaardiging sou daar, in ons binneste gewees het?',
'duration_s': 5.119999885559082,
'lang_iso3': 'afr',
'lang_name': 'Afrikaans'}
📌 ASR Evaluation Configurations
| Config Name | Language | ISO | # Samples | # Hours |
|---|---|---|---|---|
| asr_test_Akuapim-twi | Akuapim-twi | Akuapim-twi | 1,000 | 1.35 |
| asr_test_Asante-twi | Asante-twi | Asante-twi | 1,000 | 0.97 |
| asr_test_afr | Afrikaans | afr | 1,000 | 0.87 |
| asr_test_amh | Amharic | amh | 581 | 1.12 |
| asr_test_bas | Basaa | bas | 582 | 0.76 |
| asr_test_bem | Bemba | bem | 1,000 | 2.15 |
| asr_test_dav | Taita | dav | 878 | 1.17 |
| asr_test_dyu | Dyula | dyu | 59 | 0.10 |
| asr_test_fat | Fanti | fat | 1,000 | 1.38 |
| asr_test_fon | Fon | fon | 1,000 | 0.66 |
| asr_test_fuc | Pulaar | fuc | 100 | 0.10 |
| asr_test_fuf | Pular | fuf | 129 | 0.03 |
| asr_test_gaa | Ga | gaa | 1,000 | 1.52 |
| asr_test_hau | Hausa | hau | 681 | 0.89 |
| asr_test_ibo | Igbo | ibo | 5 | 0.01 |
| asr_test_kab | Kabyle | kab | 1,000 | 1.05 |
| asr_test_kin | Kinyarwanda | kin | 1,000 | 1.50 |
| asr_test_kln | Kalenjin | kln | 1,000 | 1.50 |
| asr_test_loz | Lozi | loz | 399 | 0.91 |
| asr_test_lug | Ganda | lug | 1,000 | 1.65 |
| asr_test_luo | Luo (Kenya and Tanzania) | luo | 1,000 | 1.31 |
| asr_test_mlq | Western Maninkakan | mlq | 182 | 0.04 |
| asr_test_nbl | South Ndebele | nbl | 1,000 | 1.12 |
| asr_test_nso | Northern Sotho | nso | 1,000 | 0.88 |
| asr_test_nya | Nyanja | nya | 428 | 1.31 |
| asr_test_sot | Southern Sotho | sot | 1,000 | 0.82 |
| asr_test_srr | Serer | srr | 899 | 2.84 |
| asr_test_ssw | Swati | ssw | 1,000 | 0.93 |
| asr_test_sus | Susu | sus | 210 | 0.05 |
| asr_test_swa | Swahili | swa | 1,000 | 1.23 |
| asr_test_tig | Tigre | tig | 185 | 0.33 |
| asr_test_tir | Tigrinya | tir | 7 | 0.01 |
| asr_test_toi | Tonga (Zambia) | toi | 463 | 1.47 |
| asr_test_tsn | Tswana | tsn | 1,000 | 0.82 |
| asr_test_tso | Tsonga | tso | 1,000 | 0.99 |
| asr_test_twi | Twi | twi | 12 | 0.02 |
| asr_test_ven | Venda | ven | 1,000 | 0.92 |
| asr_test_wol | Wolof | wol | 1,000 | 1.19 |
| asr_test_xho | Xhosa | xho | 1,000 | 0.92 |
| asr_test_yor | Yoruba | yor | 359 | 0.42 |
| asr_test_zgh | Standard Moroccan Tamazight | zgh | 197 | 0.22 |
| asr_test_zul | Zulu | zul | 1,000 | 1.10 |
📌 TTS Evaluation Configurations
| Config Name | Language | ISO | # Samples | # Hours |
|---|---|---|---|---|
| tts_test_ewe | Ewe | ewe | 66 | 0.29 |
| tts_test_kin | Kinyarwanda | kin | 1,053 | 1.30 |
| tts_test_Asante-twi | Asante-twi | Asante-twi | 64 | 0.18 |
| tts_test_yor | Yoruba | yor | 40 | 0.13 |
| tts_test_wol | Wolof | wol | 4,001 | 4.12 |
| tts_test_hau | Hausa | hau | 124 | 0.24 |
| tts_test_lin | Lingala | lin | 63 | 0.28 |
| tts_test_xho | Xhosa | xho | 242 | 0.31 |
| tts_test_tsn | Tswana | tsn | 238 | 0.36 |
| tts_test_afr | Afrikaans | afr | 293 | 0.34 |
| tts_test_sot | Southern Sotho | sot | 210 | 0.33 |
| tts_test_Akuapim-twi | Akuapim-twi | Akuapim-twi | 83 | 0.22 |
📌 SLID Evaluation
| Config Name | Language Scope | # Samples | # Hours |
|---|---|---|---|
| slid_61_test | 61 Languages | 21,817 | 34.36 |
📌 Training data
| Config Name | Language | # Samples (Train / Dev) | # Hours (Train / Dev) |
|---|---|---|---|
| asr_train_dev | 42 languages | 218,515 / 7,500 | 204.51 / 10.49 |
| slid_train_dev | 49 languages | 355,337 / 8,529 | 330.50 / 13.49 |
| tts_train_dev | 7 languages | 81,729 / 1,347 | 140.51 / 3.49 |
| TOTAL | 655,581 / 17,376 | 675.52 / 27.47 |
📖 Citation
If you use the SimbaBench dataset for your scientific publication, or if you find the resources in this repository useful, please cite our paper and the original dataset papers.
📄 SimbaBench Paper
@inproceedings{elmadany-etal-2025-voice,
title = "Voice of a Continent: Mapping {A}frica{'}s Speech Technology Frontier",
author = "Elmadany, AbdelRahim A. and
Kwon, Sang Yun and
Toyin, Hawau Olamide and
Alcoba Inciarte, Alcides and
Aldarmaki, Hanan and
Abdul-Mageed, Muhammad",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.emnlp-main.559/",
doi = "10.18653/v1/2025.emnlp-main.559",
pages = "11039--11061",
ISBN = "979-8-89176-332-6",
}
📄 Original ASR Datasets Citation
Besacier & Gauthier, 2023 — Alffa Public
@misc{besacier-gauthier-2023-alffa,
author = {Besacier, Laurent and Gauthier, Elodie},
title = {{ALFFA\_PUBLIC}: {A}frican Languages Factored Lattices
for Automatic Speech Recognition},
year = {2023},
howpublished = {\url{https://github.com/getalp/ALFFA_PUBLIC}},
}
Sikasote & Anastasopoulos, 2022 — BembaSpeech
@inproceedings{sikasote-anastasopoulos-2022-bembaspeech,
author = {Sikasote, Claytone and Anastasopoulos, Antonios},
title = {{BembaSpeech}: A Speech Recognition Corpus for the
Bemba Language},
booktitle = {Proceedings of the Language Resources and Evaluation
Conference},
pages = {7277--7283},
year = {2022},
address = {Marseille, France},
publisher = {European Language Resources Association},
}
Mozilla Foundation, 2023 — Common Voice (CV-19)
@misc{mozilla-2023-commonvoice,
author = {{Mozilla Foundation}},
title = {Mozilla Common Voice: A Massively Multilingual Open
Dataset for Voice Technologies},
year = {2023},
howpublished = {\url{https://commonvoice.mozilla.org}},
}
Asamoah Owusu et al., 2022 — Financial Speech
@misc{asamoahowusu-etal-2022-financialspeech,
author = {{Asamoah Owusu}, D. and Korsah, A. and Quartey, B.
and {Nwolley Jnr.}, S. and Sampah, D.
and Adjepon-Yamoah, D. and {Omane Boateng}, L.},
title = {Financial Inclusion Speech Dataset},
year = {2022},
howpublished = {\url{https://github.com/Ashesi-Org/Financial-Inclusion-Speech-Dataset}},
note = {Created by Ashesi University and Nokwary Technologies
with funding from Lacuna Fund},
}
Gauthier et al., 2024 — Kallaama
@inproceedings{gauthier-etal-2024-kallaama,
author = {Gauthier, Elodie and Ndiaye, Aminata and Guissé, Abdoulaye},
title = {Kallaama: A Transcribed Speech Dataset about Agriculture
in the Three Most Widely Spoken Languages in {S}enegal},
booktitle = {Proceedings of the Fifth Workshop on Resources for
African Indigenous Languages ({RAIL}) @ {LREC-COLING} 2024},
year = {2024},
address = {Lannion, France; Dakar and Thiès, Sénégal},
}
Van Heerden et al., 2016 — Lwazi
@inproceedings{vanheerden-etal-2016-lwazi,
author = {Van Heerden, Charl and Kleynhans, Neil and Davel, Marelie H.},
title = {Improving the {Lwazi} {ASR} Baseline},
booktitle = {Proceedings of Interspeech 2016},
year = {2016},
}
NaijaVoices, 2024 — Naija Voices
@misc{naijavoices-2024,
author = {{NaijaVoices}},
title = {{NaijaVoices} Dataset: A Multilingual Speech Corpus
for {N}igerian Languages},
year = {2024},
howpublished = {\url{https://naijavoices.com/}},
}
Barnard et al., 2014 — NCHLT + AUX1/2
@inproceedings{barnard-etal-2014-nchlt,
author = {Barnard, Etienne and Davel, Marelie H. and
Van Heerden, Charl and De Wet, Febe and Badenhorst, Jaco},
title = {The {NCHLT} Speech Corpus of the {S}outh {A}frican Languages},
booktitle = {Proceedings of the 2014 Spoken Language Technologies for
Under-resourced Languages ({SLTU}) Workshop},
pages = {194--200},
year = {2014},
address = {St. Petersburg, Russia},
}
Doumbouya et al., 2021 — Nicolingua (0003 & 0004)
@inproceedings{doumbouya-etal-2021-nicolingua,
author = {Doumbouya, Moussa and Einstein, Lisa and Piech, Chris},
title = {Using Radio Archives for Low-Resource Speech Recognition:
Towards an Intelligent Virtual Assistant for Illiterate Users},
booktitle = {Proceedings of the {AAAI} Conference on Artificial Intelligence},
volume = {35},
year = {2021},
}
Gutkin et al., 2020 — YorubaVoice
@inproceedings{gutkin-etal-2020-yoruba,
author = {Gutkin, Alexander and Demirşahin, Işın and
Kjartansson, Oddur and Rivera, Clara and Túbòsún, Kólá},
title = {Developing an Open-Source Corpus of {Y}oruba Speech},
booktitle = {Proceedings of Interspeech 2020},
pages = {404--408},
year = {2020},
address = {Shanghai, China},
publisher = {International Speech and Communication Association ({ISCA})},
doi = {10.21437/Interspeech.2020-1096},
}
Sikasote et al., 2023 — Zambezi Voice (ASR & Audio Only)
@inproceedings{sikasote-etal-2023-zambezi,
author = {Sikasote, Claytone and Siaminwe, Kalinda and Mwape, Stanly
and Zulu, Bangiwe and Phiri, Mofya and Phiri, Martin
and Zulu, David and Nyirenda, Mayumbo and Anastasopoulos, Antonios},
title = {{Zambezi Voice}: A Multilingual Speech Corpus for {Z}ambian Languages},
booktitle = {Proc. Interspeech 2023},
pages = {3984--3988},
year = {2023},
}
Van der Westhuizen & Niesler, 2018 — SO (Code-Switched)
@inproceedings{derwesthuizen-niesler-2018-soap,
author = {{Van der Westhuizen}, Ewald and Niesler, Thomas},
title = {A First {S}outh {A}frican Corpus of Multilingual Code-switched
Soap Opera Speech},
booktitle = {Proceedings of the Eleventh International Conference on
Language Resources and Evaluation ({LREC} 2018)},
year = {2018},
address = {Miyazaki, Japan},
publisher = {European Language Resources Association ({ELRA})},
}
Modipa et al., 2015 — SPCS (Code-Switched)
@inproceedings{modipa-etal-2015-spcs,
author = {Modipa, T. I. and Davel, M. H. and De Wet, F.},
title = {Implications of {S}epedi/{E}nglish Code Switching for {ASR} Systems},
booktitle = {Proceedings of the Pattern Recognition Association of
{S}outh {A}frica ({PRASA})},
pages = {112--117},
year = {2015},
}
📄 Original SLID Datasets Citation
Doumbouya et al., 2021 — Nicolingua (0003)
@inproceedings{doumbouya-etal-2021-nicolingua,
author = {Doumbouya, Moussa and Einstein, Lisa and Piech, Chris},
title = {Using Radio Archives for Low-Resource Speech Recognition:
Towards an Intelligent Virtual Assistant for Illiterate Users},
booktitle = {Proceedings of the {AAAI} Conference on Artificial Intelligence},
volume = {35},
year = {2021},
}
The Brick House Cooperative, 2024 — OlongoAfrica
@misc{olongoafrica-2024,
author = {{The Brick House Cooperative}},
title = {{OlongoAfrica} Multilingual Anthology},
year = {2024},
howpublished = {\url{https://lingua.olongoafrica.com/}},
note = {A collection of translated and narrated short stories
in various African languages, including Edo, Tamazight,
Yoruba, Swahili, Hausa, Tiv, Shona, Ibibio, Igbo,
and Nigerian Pidgin},
}
UDHR Audio, 2025 — UDHR
@misc{udhr-audio-2025,
author = {{Universal Declaration of Human Rights Audio}},
title = {Universal Declaration of Human Rights Audio Project},
year = {2025},
howpublished = {\url{https://udhr.audio/}},
note = {Audio recordings of the Universal Declaration of Human
Rights in multiple languages},
}
Elmadany et al., 2025 — Voice of Africa (VOA)
@inproceedings{elmadany-etal-2025-simba,
author = {Elmadany, AbdelRahim and Kwon, Sang Yun and
Toyin, Hawau Olamide and Inciarte, Alcides Alcoba and
Aldarmaki, Hanan and Abdul-Mageed, Muhammad},
title = {Voice of a Continent: Mapping {A}frica's Speech
Technology Frontier},
booktitle = {Proceedings of the 2025 Conference on Empirical Methods
in Natural Language Processing},
pages = {11028--11050},
year = {2025},
publisher = {Association for Computational Linguistics},
url = {https://aclanthology.org/2025.emnlp-main.559},
}
Valk & Alumäe, 2021 — VoxLingua
@inproceedings{valk-alumae-2021-voxlingua,
author = {Valk, Jörgen and Alumäe, Tanel},
title = {{VoxLingua107}: A Dataset for Spoken Language Recognition},
booktitle = {Proceedings of the {IEEE} Spoken Language Technology
Workshop ({SLT})},
year = {2021},
}
Sikasote et al., 2023 — Zambezi Voice (Audio Only)
@inproceedings{sikasote-etal-2023-zambezi,
author = {Sikasote, Claytone and Siaminwe, Kalinda and Mwape, Stanly
and Zulu, Bangiwe and Phiri, Mofya and Phiri, Martin
and Zulu, David and Nyirenda, Mayumbo and Anastasopoulos, Antonios},
title = {{Zambezi Voice}: A Multilingual Speech Corpus for {Z}ambian Languages},
booktitle = {Proc. Interspeech 2023},
pages = {3984--3988},
year = {2023},
}
📄 Original TTS Datasets Citation
Meyer et al., 2022 — BibleTTS
@inproceedings{meyer-etal-2022-bibletts,
author = {Meyer, Josh and Adelani, David and Casanova, Edresson
and Öktem, Alp and Whitenack, Daniel and Weber, Julian
and {Kabongo Kabenamualu}, Salomon and Salesky, Elizabeth
and Orife, Iroro and Leong, Colin and Ogayo, Perez
and {Chinenye Emezue}, Chris and Mukiibi, Jonathan
and Osei, Salomey and Agbolo, Apelete and Akinode, Victor
and Opoku, Bernard and Samuel, Olanrewaju and Alabi, Jesujoba
and Muhammad, Shamsuddeen Hassan},
title = {{BibleTTS}: A Large, High-Fidelity, Multilingual, and
Uniquely {A}frican Speech Corpus},
booktitle = {Interspeech 2022},
pages = {2383--2387},
year = {2022},
address = {Incheon, Korea},
publisher = {ISCA},
doi = {10.21437/Interspeech.2022-10850},
}
van Niekerk et al., 2017 — High-Quality TTS (SA)
@inproceedings{vanniekerk-etal-2017-hqtts,
author = {van Niekerk, Daniel and van Heerden, Charl and Davel, Marelie
and Kleynhans, Neil and Kjartansson, Oddur and Jansche, Martin
and Ha, Linne},
title = {Rapid Development of {TTS} Corpora for Four {S}outh {A}frican Languages},
booktitle = {Interspeech 2017},
pages = {2178--2182},
year = {2017},
address = {Stockholm, Sweden},
publisher = {ISCA},
doi = {10.21437/Interspeech.2017-1139},
}
Digital Umuganda, 2023 — Kinyarwanda TTS
@misc{digitalumuganda-2023-kinyarwanda,
author = {{Digital Umuganda}},
title = {{AfriSpeech} {K}inyarwanda Male and Female {TTS} Datasets},
year = {2023},
howpublished = {\url{https://huggingface.co/datasets/DigitalUmuganda/afrispeak_kinyarwanda_male_tts_dataset}},
}