| | import datasets |
| | import json |
| | import jsonlines |
| |
|
| | logger = datasets.logging.get_logger(__name__) |
| |
|
| | _URL = "data/" |
| | _URLS = { |
| | "train": _URL + "train.jsonl", |
| | "dev": _URL + "val.jsonl", |
| | "test": _URL + "test.jsonl", |
| | "html": _URL + "src_docs.json" |
| | } |
| | _VERSION = "0.0.1" |
| | _DATASETNAME = "CroCoSum" |
| |
|
| | class SolidotSumConfig(datasets.BuilderConfig): |
| | """BuilderConfig for SolidotSum.""" |
| |
|
| | def __init__(self, **kwargs): |
| | """BuilderConfig for SolidotSum. |
| | Args: |
| | **kwargs: keyword arguments forwarded to super. |
| | """ |
| | super(SolidotSumConfig, self).__init__(**kwargs) |
| | |
| |
|
| | class SolidotSum(datasets.GeneratorBasedBuilder): |
| | """SolidotSum: Summarization dataset from Solidot. """ |
| |
|
| | BUILDER_CONFIGS = [ |
| | SolidotSumConfig( |
| | name="plain_text", |
| | version=datasets.Version(_VERSION, ""), |
| | description="Dataset for Chinese-English Crosslingual Code-switched Summarization", |
| | ), |
| | ] |
| |
|
| | def _info(self): |
| | return datasets.DatasetInfo( |
| | description=_DATASETNAME, |
| | features=datasets.Features( |
| | { |
| | "post_id": datasets.Value("string"), |
| | "post_url": datasets.Value("string"), |
| | "post_title": datasets.Value("string"), |
| | "post_body": datasets.Value("string"), |
| | "links": datasets.features.Sequence( |
| | { |
| | "link_id": datasets.Value("string"), |
| | "link_url": datasets.Value("string"), |
| | "link_title": datasets.Value("string"), |
| | "link_body":datasets.Value("string") |
| | } |
| | ), |
| | } |
| | ), |
| |
|
| | ) |
| |
|
| | def _split_generators(self, dl_manager): |
| | downloaded_files = dl_manager.download_and_extract(_URLS) |
| |
|
| | return [ |
| | datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"], "html_path": downloaded_files['html']}), |
| | datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"], "html_path": downloaded_files['html']}), |
| | datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"], "html_path": downloaded_files['html']}), |
| | ] |
| |
|
| | def _generate_examples(self, filepath, html_path): |
| | """This function returns the examples in the raw (text) form.""" |
| | logger.info("generating examples from = %s", filepath) |
| | key = 0 |
| | with open(html_path) as f: |
| | html_id_body_text_url = json.load(f) |
| | with jsonlines.open(filepath) as f: |
| | for post in f: |
| | post_id = post['post_id'] |
| | post_title = post['post_title'] |
| | post_url = post['post_url'] |
| | post_body = post['post_text'] |
| | link_id = post['links'] |
| | link_urls = [] |
| | link_title = [] |
| | link_body = [] |
| | |
| | for link in post["links"]: |
| | html_content = html_id_body_text_url[str(link)] |
| | link_urls.append(html_content['url']) |
| | link_title.append(html_content['title']) |
| | link_body.append(html_content['body']) |
| | |
| | yield key, { |
| | "post_id": post_id, |
| | "post_url": post_url, |
| | "post_title": post_title, |
| | "post_body":post_body, |
| | "links": { |
| | "link_id": link_id, |
| | "link_url": link_urls, |
| | "link_title": link_title, |
| | "link_body": link_body |
| | }, |
| | } |
| | key += 1 |
| |
|
| |
|