| """HuggingFace loading script for the JamALT dataset.""" |
|
|
| import csv |
| import json |
| import os |
| from dataclasses import dataclass |
| from typing import Optional |
|
|
| import datasets |
|
|
| _VERSION = "1.4.0" |
|
|
|
|
| _CITATION = """\ |
| @misc{cifka-2023-jam-alt, |
| author = {Ond\v{r}ej C\'ifka and |
| Constantinos Dimitriou and |
| {Cheng-i} Wang and |
| Hendrik Schreiber and |
| Luke Miner and |
| Fabian-Robert St\"oter}, |
| title = {{Jam-ALT}: A Formatting-Aware Lyrics Transcription Benchmark}, |
| eprint = {arXiv:2311.13987}, |
| year = 2023 |
| } |
| @inproceedings{durand-2023-contrastive, |
| author={Durand, Simon and Stoller, Daniel and Ewert, Sebastian}, |
| booktitle={2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}, |
| title={Contrastive Learning-Based Audio to Lyrics Alignment for Multiple Languages}, |
| year={2023}, |
| pages={1-5}, |
| address={Rhodes Island, Greece}, |
| doi={10.1109/ICASSP49357.2023.10096725} |
| } |
| """ |
|
|
| _DESCRIPTION = """\ |
| Jam-ALT: A formatting-aware lyrics transcription benchmark. |
| """ |
|
|
| _HOMEPAGE = "https://audioshake.github.io/jam-alt" |
|
|
| _METADATA_FILENAME = "metadata.csv" |
|
|
|
|
| _LANGUAGE_NAME_TO_CODE = { |
| "English": "en", |
| "French": "fr", |
| "German": "de", |
| "Spanish": "es", |
| } |
|
|
|
|
| @dataclass |
| class JamAltBuilderConfig(datasets.BuilderConfig): |
| language: Optional[str] = None |
| with_audio: bool = True |
| decode_audio: bool = True |
| sampling_rate: Optional[int] = None |
| mono: bool = True |
|
|
|
|
| class JamAltDataset(datasets.GeneratorBasedBuilder): |
| _DESCRIPTION |
|
|
| VERSION = datasets.Version(_VERSION) |
| BUILDER_CONFIG_CLASS = JamAltBuilderConfig |
| BUILDER_CONFIGS = [JamAltBuilderConfig("all")] + [ |
| JamAltBuilderConfig(lang, language=lang) |
| for lang in _LANGUAGE_NAME_TO_CODE.values() |
| ] |
| DEFAULT_CONFIG_NAME = "all" |
|
|
| def _info(self): |
| feat_dict = { |
| "name": datasets.Value("string"), |
| "text": datasets.Value("string"), |
| "text_tagged": datasets.Value("string"), |
| "lines": [ |
| { |
| "start": datasets.Value("float64"), |
| "end": datasets.Value("float64"), |
| "text": datasets.Value("string"), |
| "text_tagged": datasets.Value("string") |
| } |
| ], |
| "language": datasets.Value("string"), |
| "license_type": datasets.Value("string"), |
| } |
| if self.config.with_audio: |
| feat_dict["audio"] = datasets.Audio( |
| decode=self.config.decode_audio, |
| sampling_rate=self.config.sampling_rate, |
| mono=self.config.mono, |
| ) |
|
|
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=datasets.Features(feat_dict), |
| supervised_keys=("audio", "text") if "audio" in feat_dict else None, |
| homepage=_HOMEPAGE, |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| metadata_path = dl_manager.download(_METADATA_FILENAME) |
|
|
| audio_paths, text_paths, text_tagged_paths, alignment_paths, metadata = [], [], [], [], [] |
| lyrics_dir = "lyrics/" |
| lyrics_tagged_dir = "lyrics_tagged/" |
| alignment_dir = "alignment/" |
|
|
| with open(metadata_path, encoding="utf-8") as f: |
| for row in csv.DictReader(f): |
| if ( |
| self.config.language is None |
| or _LANGUAGE_NAME_TO_CODE[row["Language"]] == self.config.language |
| ): |
| audio_paths.append("audio/" + row["Filepath"]) |
| text_paths.append( |
| lyrics_dir + os.path.splitext(row["Filepath"])[0] + ".txt" |
| ) |
| text_tagged_paths.append( |
| lyrics_tagged_dir + os.path.splitext(row["Filepath"])[0] + ".txt" |
| ) |
| alignment_paths.append( |
| alignment_dir + os.path.splitext(row["Filepath"])[0] + ".json" |
| ) |
| metadata.append(row) |
|
|
| text_paths = dl_manager.download(text_paths) |
| alignment_paths = dl_manager.download(alignment_paths) |
| audio_paths = ( |
| dl_manager.download(audio_paths) if self.config.with_audio else None |
| ) |
|
|
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| gen_kwargs=dict( |
| text_paths=text_paths, |
| text_tagged_paths=text_tagged_paths, |
| alignment_paths=alignment_paths, |
| audio_paths=audio_paths, |
| metadata=metadata, |
| ), |
| ), |
| ] |
|
|
| def _generate_examples(self, text_paths, text_tagged_paths, alignment_paths, audio_paths, metadata): |
| if audio_paths is None: |
| audio_paths = [None] * len(text_paths) |
| for text_path, text_tagged_path, alignment_path, audio_path, meta in zip( |
| text_paths, text_tagged_paths, alignment_paths, audio_paths, metadata |
| ): |
| name = os.path.splitext(meta["Filepath"])[0] |
| with open(text_path, encoding="utf-8") as text_f: |
| text = text_f.read() |
| with open(text_tagged_path, encoding="utf-8") as text_tagged_f: |
| text_tagged = text_tagged_f.read() |
| with open(alignment_path, encoding="utf-8") as f: |
| lines = json.load(f)["lines"] |
| record = { |
| "name": name, |
| "text": text, |
| "text_tagged": text_tagged, |
| "lines": lines, |
| "language": _LANGUAGE_NAME_TO_CODE[meta["Language"]], |
| "license_type": meta["LicenseType"], |
| } |
| if audio_path is not None: |
| record["audio"] = audio_path |
| yield name, record |
|
|